1 /* Convert tree expression to rtl instructions, for GNU compiler.
2 Copyright (C) 1988-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
38 #include "diagnostic.h"
40 #include "fold-const.h"
41 #include "stor-layout.h"
45 #include "insn-attr.h"
50 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
52 #include "optabs-tree.h"
55 #include "langhooks.h"
56 #include "common/common-target.h"
58 #include "tree-ssa-live.h"
59 #include "tree-outof-ssa.h"
60 #include "tree-ssa-address.h"
63 #include "gimple-fold.h"
64 #include "rtx-vector-builder.h"
65 #include "tree-pretty-print.h"
69 /* If this is nonzero, we do not bother generating VOLATILE
70 around volatile memory references, and we are willing to
71 output indirect addresses. If cse is to follow, we reject
72 indirect addresses so a useful potential cse is generated;
73 if it is used only once, instruction combination will produce
74 the same indirect address eventually. */
77 static bool block_move_libcall_safe_for_call_parm (void);
78 static bool emit_block_move_via_pattern (rtx
, rtx
, rtx
, unsigned, unsigned,
79 HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
80 unsigned HOST_WIDE_INT
,
81 unsigned HOST_WIDE_INT
, bool);
82 static void emit_block_move_via_loop (rtx
, rtx
, rtx
, unsigned);
83 static void clear_by_pieces (rtx
, unsigned HOST_WIDE_INT
, unsigned int);
84 static rtx_insn
*compress_float_constant (rtx
, rtx
);
85 static rtx
get_subtarget (rtx
);
86 static void store_constructor (tree
, rtx
, int, poly_int64
, bool);
87 static rtx
store_field (rtx
, poly_int64
, poly_int64
, poly_uint64
, poly_uint64
,
88 machine_mode
, tree
, alias_set_type
, bool, bool);
90 static unsigned HOST_WIDE_INT
highest_pow2_factor_for_target (const_tree
, const_tree
);
92 static int is_aligning_offset (const_tree
, const_tree
);
93 static rtx
reduce_to_bit_field_precision (rtx
, rtx
, tree
);
94 static rtx
do_store_flag (sepops
, rtx
, machine_mode
);
96 static void emit_single_push_insn (machine_mode
, rtx
, tree
);
98 static void do_tablejump (rtx
, machine_mode
, rtx
, rtx
, rtx
,
100 static rtx
const_vector_from_tree (tree
);
101 static tree
tree_expr_size (const_tree
);
102 static HOST_WIDE_INT
int_expr_size (tree
);
103 static void convert_mode_scalar (rtx
, rtx
, int);
106 /* This is run to set up which modes can be used
107 directly in memory and to initialize the block move optab. It is run
108 at the beginning of compilation and when the target is reinitialized. */
111 init_expr_target (void)
118 /* Try indexing by frame ptr and try by stack ptr.
119 It is known that on the Convex the stack ptr isn't a valid index.
120 With luck, one or the other is valid on any machine. */
121 mem
= gen_rtx_MEM (word_mode
, stack_pointer_rtx
);
122 mem1
= gen_rtx_MEM (word_mode
, frame_pointer_rtx
);
124 /* A scratch register we can modify in-place below to avoid
125 useless RTL allocations. */
126 reg
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 1);
128 rtx_insn
*insn
= as_a
<rtx_insn
*> (rtx_alloc (INSN
));
129 pat
= gen_rtx_SET (NULL_RTX
, NULL_RTX
);
130 PATTERN (insn
) = pat
;
132 for (machine_mode mode
= VOIDmode
; (int) mode
< NUM_MACHINE_MODES
;
133 mode
= (machine_mode
) ((int) mode
+ 1))
137 direct_load
[(int) mode
] = direct_store
[(int) mode
] = 0;
138 PUT_MODE (mem
, mode
);
139 PUT_MODE (mem1
, mode
);
141 /* See if there is some register that can be used in this mode and
142 directly loaded or stored from memory. */
144 if (mode
!= VOIDmode
&& mode
!= BLKmode
)
145 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
146 && (direct_load
[(int) mode
] == 0 || direct_store
[(int) mode
] == 0);
149 if (!targetm
.hard_regno_mode_ok (regno
, mode
))
152 set_mode_and_regno (reg
, mode
, regno
);
155 SET_DEST (pat
) = reg
;
156 if (recog (pat
, insn
, &num_clobbers
) >= 0)
157 direct_load
[(int) mode
] = 1;
159 SET_SRC (pat
) = mem1
;
160 SET_DEST (pat
) = reg
;
161 if (recog (pat
, insn
, &num_clobbers
) >= 0)
162 direct_load
[(int) mode
] = 1;
165 SET_DEST (pat
) = mem
;
166 if (recog (pat
, insn
, &num_clobbers
) >= 0)
167 direct_store
[(int) mode
] = 1;
170 SET_DEST (pat
) = mem1
;
171 if (recog (pat
, insn
, &num_clobbers
) >= 0)
172 direct_store
[(int) mode
] = 1;
176 mem
= gen_rtx_MEM (VOIDmode
, gen_raw_REG (Pmode
, LAST_VIRTUAL_REGISTER
+ 1));
178 opt_scalar_float_mode mode_iter
;
179 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_FLOAT
)
181 scalar_float_mode mode
= mode_iter
.require ();
182 scalar_float_mode srcmode
;
183 FOR_EACH_MODE_UNTIL (srcmode
, mode
)
187 ic
= can_extend_p (mode
, srcmode
, 0);
188 if (ic
== CODE_FOR_nothing
)
191 PUT_MODE (mem
, srcmode
);
193 if (insn_operand_matches (ic
, 1, mem
))
194 float_extend_from_mem
[mode
][srcmode
] = true;
199 /* This is run at the start of compiling a function. */
204 memset (&crtl
->expr
, 0, sizeof (crtl
->expr
));
207 /* Copy data from FROM to TO, where the machine modes are not the same.
208 Both modes may be integer, or both may be floating, or both may be
210 UNSIGNEDP should be nonzero if FROM is an unsigned type.
211 This causes zero-extension instead of sign-extension. */
214 convert_move (rtx to
, rtx from
, int unsignedp
)
216 machine_mode to_mode
= GET_MODE (to
);
217 machine_mode from_mode
= GET_MODE (from
);
219 gcc_assert (to_mode
!= BLKmode
);
220 gcc_assert (from_mode
!= BLKmode
);
222 /* If the source and destination are already the same, then there's
227 /* If FROM is a SUBREG that indicates that we have already done at least
228 the required extension, strip it. We don't handle such SUBREGs as
231 scalar_int_mode to_int_mode
;
232 if (GET_CODE (from
) == SUBREG
233 && SUBREG_PROMOTED_VAR_P (from
)
234 && is_a
<scalar_int_mode
> (to_mode
, &to_int_mode
)
235 && (GET_MODE_PRECISION (subreg_promoted_mode (from
))
236 >= GET_MODE_PRECISION (to_int_mode
))
237 && SUBREG_CHECK_PROMOTED_SIGN (from
, unsignedp
))
239 from
= gen_lowpart (to_int_mode
, SUBREG_REG (from
));
240 from_mode
= to_int_mode
;
243 gcc_assert (GET_CODE (to
) != SUBREG
|| !SUBREG_PROMOTED_VAR_P (to
));
245 if (to_mode
== from_mode
246 || (from_mode
== VOIDmode
&& CONSTANT_P (from
)))
248 emit_move_insn (to
, from
);
252 if (VECTOR_MODE_P (to_mode
) || VECTOR_MODE_P (from_mode
))
254 if (GET_MODE_UNIT_PRECISION (to_mode
)
255 > GET_MODE_UNIT_PRECISION (from_mode
))
257 optab op
= unsignedp
? zext_optab
: sext_optab
;
258 insn_code icode
= convert_optab_handler (op
, to_mode
, from_mode
);
259 if (icode
!= CODE_FOR_nothing
)
261 emit_unop_insn (icode
, to
, from
,
262 unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
);
267 if (GET_MODE_UNIT_PRECISION (to_mode
)
268 < GET_MODE_UNIT_PRECISION (from_mode
))
270 insn_code icode
= convert_optab_handler (trunc_optab
,
272 if (icode
!= CODE_FOR_nothing
)
274 emit_unop_insn (icode
, to
, from
, TRUNCATE
);
279 gcc_assert (known_eq (GET_MODE_BITSIZE (from_mode
),
280 GET_MODE_BITSIZE (to_mode
)));
282 if (VECTOR_MODE_P (to_mode
))
283 from
= simplify_gen_subreg (to_mode
, from
, GET_MODE (from
), 0);
285 to
= simplify_gen_subreg (from_mode
, to
, GET_MODE (to
), 0);
287 emit_move_insn (to
, from
);
291 if (GET_CODE (to
) == CONCAT
&& GET_CODE (from
) == CONCAT
)
293 convert_move (XEXP (to
, 0), XEXP (from
, 0), unsignedp
);
294 convert_move (XEXP (to
, 1), XEXP (from
, 1), unsignedp
);
298 convert_mode_scalar (to
, from
, unsignedp
);
301 /* Like convert_move, but deals only with scalar modes. */
304 convert_mode_scalar (rtx to
, rtx from
, int unsignedp
)
306 /* Both modes should be scalar types. */
307 scalar_mode from_mode
= as_a
<scalar_mode
> (GET_MODE (from
));
308 scalar_mode to_mode
= as_a
<scalar_mode
> (GET_MODE (to
));
309 bool to_real
= SCALAR_FLOAT_MODE_P (to_mode
);
310 bool from_real
= SCALAR_FLOAT_MODE_P (from_mode
);
314 gcc_assert (to_real
== from_real
);
316 /* rtx code for making an equivalent value. */
317 enum rtx_code equiv_code
= (unsignedp
< 0 ? UNKNOWN
318 : (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
));
326 gcc_assert ((GET_MODE_PRECISION (from_mode
)
327 != GET_MODE_PRECISION (to_mode
))
328 || (DECIMAL_FLOAT_MODE_P (from_mode
)
329 != DECIMAL_FLOAT_MODE_P (to_mode
)));
331 if (GET_MODE_PRECISION (from_mode
) == GET_MODE_PRECISION (to_mode
))
332 /* Conversion between decimal float and binary float, same size. */
333 tab
= DECIMAL_FLOAT_MODE_P (from_mode
) ? trunc_optab
: sext_optab
;
334 else if (GET_MODE_PRECISION (from_mode
) < GET_MODE_PRECISION (to_mode
))
339 /* Try converting directly if the insn is supported. */
341 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
342 if (code
!= CODE_FOR_nothing
)
344 emit_unop_insn (code
, to
, from
,
345 tab
== sext_optab
? FLOAT_EXTEND
: FLOAT_TRUNCATE
);
349 /* Otherwise use a libcall. */
350 libcall
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
352 /* Is this conversion implemented yet? */
353 gcc_assert (libcall
);
356 value
= emit_library_call_value (libcall
, NULL_RTX
, LCT_CONST
, to_mode
,
358 insns
= get_insns ();
360 emit_libcall_block (insns
, to
, value
,
361 tab
== trunc_optab
? gen_rtx_FLOAT_TRUNCATE (to_mode
,
363 : gen_rtx_FLOAT_EXTEND (to_mode
, from
));
367 /* Handle pointer conversion. */ /* SPEE 900220. */
368 /* If the target has a converter from FROM_MODE to TO_MODE, use it. */
372 if (GET_MODE_PRECISION (from_mode
) > GET_MODE_PRECISION (to_mode
))
379 if (convert_optab_handler (ctab
, to_mode
, from_mode
)
382 emit_unop_insn (convert_optab_handler (ctab
, to_mode
, from_mode
),
388 /* Targets are expected to provide conversion insns between PxImode and
389 xImode for all MODE_PARTIAL_INT modes they use, but no others. */
390 if (GET_MODE_CLASS (to_mode
) == MODE_PARTIAL_INT
)
392 scalar_int_mode full_mode
393 = smallest_int_mode_for_size (GET_MODE_BITSIZE (to_mode
));
395 gcc_assert (convert_optab_handler (trunc_optab
, to_mode
, full_mode
)
396 != CODE_FOR_nothing
);
398 if (full_mode
!= from_mode
)
399 from
= convert_to_mode (full_mode
, from
, unsignedp
);
400 emit_unop_insn (convert_optab_handler (trunc_optab
, to_mode
, full_mode
),
404 if (GET_MODE_CLASS (from_mode
) == MODE_PARTIAL_INT
)
407 scalar_int_mode full_mode
408 = smallest_int_mode_for_size (GET_MODE_BITSIZE (from_mode
));
409 convert_optab ctab
= unsignedp
? zext_optab
: sext_optab
;
410 enum insn_code icode
;
412 icode
= convert_optab_handler (ctab
, full_mode
, from_mode
);
413 gcc_assert (icode
!= CODE_FOR_nothing
);
415 if (to_mode
== full_mode
)
417 emit_unop_insn (icode
, to
, from
, UNKNOWN
);
421 new_from
= gen_reg_rtx (full_mode
);
422 emit_unop_insn (icode
, new_from
, from
, UNKNOWN
);
424 /* else proceed to integer conversions below. */
425 from_mode
= full_mode
;
429 /* Make sure both are fixed-point modes or both are not. */
430 gcc_assert (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode
) ==
431 ALL_SCALAR_FIXED_POINT_MODE_P (to_mode
));
432 if (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode
))
434 /* If we widen from_mode to to_mode and they are in the same class,
435 we won't saturate the result.
436 Otherwise, always saturate the result to play safe. */
437 if (GET_MODE_CLASS (from_mode
) == GET_MODE_CLASS (to_mode
)
438 && GET_MODE_SIZE (from_mode
) < GET_MODE_SIZE (to_mode
))
439 expand_fixed_convert (to
, from
, 0, 0);
441 expand_fixed_convert (to
, from
, 0, 1);
445 /* Now both modes are integers. */
447 /* Handle expanding beyond a word. */
448 if (GET_MODE_PRECISION (from_mode
) < GET_MODE_PRECISION (to_mode
)
449 && GET_MODE_PRECISION (to_mode
) > BITS_PER_WORD
)
456 scalar_mode lowpart_mode
;
457 int nwords
= CEIL (GET_MODE_SIZE (to_mode
), UNITS_PER_WORD
);
459 /* Try converting directly if the insn is supported. */
460 if ((code
= can_extend_p (to_mode
, from_mode
, unsignedp
))
463 /* If FROM is a SUBREG, put it into a register. Do this
464 so that we always generate the same set of insns for
465 better cse'ing; if an intermediate assignment occurred,
466 we won't be doing the operation directly on the SUBREG. */
467 if (optimize
> 0 && GET_CODE (from
) == SUBREG
)
468 from
= force_reg (from_mode
, from
);
469 emit_unop_insn (code
, to
, from
, equiv_code
);
472 /* Next, try converting via full word. */
473 else if (GET_MODE_PRECISION (from_mode
) < BITS_PER_WORD
474 && ((code
= can_extend_p (to_mode
, word_mode
, unsignedp
))
475 != CODE_FOR_nothing
))
477 rtx word_to
= gen_reg_rtx (word_mode
);
480 if (reg_overlap_mentioned_p (to
, from
))
481 from
= force_reg (from_mode
, from
);
484 convert_move (word_to
, from
, unsignedp
);
485 emit_unop_insn (code
, to
, word_to
, equiv_code
);
489 /* No special multiword conversion insn; do it by hand. */
492 /* Since we will turn this into a no conflict block, we must ensure
493 the source does not overlap the target so force it into an isolated
494 register when maybe so. Likewise for any MEM input, since the
495 conversion sequence might require several references to it and we
496 must ensure we're getting the same value every time. */
498 if (MEM_P (from
) || reg_overlap_mentioned_p (to
, from
))
499 from
= force_reg (from_mode
, from
);
501 /* Get a copy of FROM widened to a word, if necessary. */
502 if (GET_MODE_PRECISION (from_mode
) < BITS_PER_WORD
)
503 lowpart_mode
= word_mode
;
505 lowpart_mode
= from_mode
;
507 lowfrom
= convert_to_mode (lowpart_mode
, from
, unsignedp
);
509 lowpart
= gen_lowpart (lowpart_mode
, to
);
510 emit_move_insn (lowpart
, lowfrom
);
512 /* Compute the value to put in each remaining word. */
514 fill_value
= const0_rtx
;
516 fill_value
= emit_store_flag_force (gen_reg_rtx (word_mode
),
517 LT
, lowfrom
, const0_rtx
,
518 lowpart_mode
, 0, -1);
520 /* Fill the remaining words. */
521 for (i
= GET_MODE_SIZE (lowpart_mode
) / UNITS_PER_WORD
; i
< nwords
; i
++)
523 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
524 rtx subword
= operand_subword (to
, index
, 1, to_mode
);
526 gcc_assert (subword
);
528 if (fill_value
!= subword
)
529 emit_move_insn (subword
, fill_value
);
532 insns
= get_insns ();
539 /* Truncating multi-word to a word or less. */
540 if (GET_MODE_PRECISION (from_mode
) > BITS_PER_WORD
541 && GET_MODE_PRECISION (to_mode
) <= BITS_PER_WORD
)
544 && ! MEM_VOLATILE_P (from
)
545 && direct_load
[(int) to_mode
]
546 && ! mode_dependent_address_p (XEXP (from
, 0),
547 MEM_ADDR_SPACE (from
)))
549 || GET_CODE (from
) == SUBREG
))
550 from
= force_reg (from_mode
, from
);
551 convert_move (to
, gen_lowpart (word_mode
, from
), 0);
555 /* Now follow all the conversions between integers
556 no more than a word long. */
558 /* For truncation, usually we can just refer to FROM in a narrower mode. */
559 if (GET_MODE_BITSIZE (to_mode
) < GET_MODE_BITSIZE (from_mode
)
560 && TRULY_NOOP_TRUNCATION_MODES_P (to_mode
, from_mode
))
563 && ! MEM_VOLATILE_P (from
)
564 && direct_load
[(int) to_mode
]
565 && ! mode_dependent_address_p (XEXP (from
, 0),
566 MEM_ADDR_SPACE (from
)))
568 || GET_CODE (from
) == SUBREG
))
569 from
= force_reg (from_mode
, from
);
570 if (REG_P (from
) && REGNO (from
) < FIRST_PSEUDO_REGISTER
571 && !targetm
.hard_regno_mode_ok (REGNO (from
), to_mode
))
572 from
= copy_to_reg (from
);
573 emit_move_insn (to
, gen_lowpart (to_mode
, from
));
577 /* Handle extension. */
578 if (GET_MODE_PRECISION (to_mode
) > GET_MODE_PRECISION (from_mode
))
580 /* Convert directly if that works. */
581 if ((code
= can_extend_p (to_mode
, from_mode
, unsignedp
))
584 emit_unop_insn (code
, to
, from
, equiv_code
);
592 /* Search for a mode to convert via. */
593 opt_scalar_mode intermediate_iter
;
594 FOR_EACH_MODE_FROM (intermediate_iter
, from_mode
)
596 scalar_mode intermediate
= intermediate_iter
.require ();
597 if (((can_extend_p (to_mode
, intermediate
, unsignedp
)
599 || (GET_MODE_SIZE (to_mode
) < GET_MODE_SIZE (intermediate
)
600 && TRULY_NOOP_TRUNCATION_MODES_P (to_mode
,
602 && (can_extend_p (intermediate
, from_mode
, unsignedp
)
603 != CODE_FOR_nothing
))
605 convert_move (to
, convert_to_mode (intermediate
, from
,
606 unsignedp
), unsignedp
);
611 /* No suitable intermediate mode.
612 Generate what we need with shifts. */
613 shift_amount
= (GET_MODE_PRECISION (to_mode
)
614 - GET_MODE_PRECISION (from_mode
));
615 from
= gen_lowpart (to_mode
, force_reg (from_mode
, from
));
616 tmp
= expand_shift (LSHIFT_EXPR
, to_mode
, from
, shift_amount
,
618 tmp
= expand_shift (RSHIFT_EXPR
, to_mode
, tmp
, shift_amount
,
621 emit_move_insn (to
, tmp
);
626 /* Support special truncate insns for certain modes. */
627 if (convert_optab_handler (trunc_optab
, to_mode
,
628 from_mode
) != CODE_FOR_nothing
)
630 emit_unop_insn (convert_optab_handler (trunc_optab
, to_mode
, from_mode
),
635 /* Handle truncation of volatile memrefs, and so on;
636 the things that couldn't be truncated directly,
637 and for which there was no special instruction.
639 ??? Code above formerly short-circuited this, for most integer
640 mode pairs, with a force_reg in from_mode followed by a recursive
641 call to this routine. Appears always to have been wrong. */
642 if (GET_MODE_PRECISION (to_mode
) < GET_MODE_PRECISION (from_mode
))
644 rtx temp
= force_reg (to_mode
, gen_lowpart (to_mode
, from
));
645 emit_move_insn (to
, temp
);
649 /* Mode combination is not recognized. */
653 /* Return an rtx for a value that would result
654 from converting X to mode MODE.
655 Both X and MODE may be floating, or both integer.
656 UNSIGNEDP is nonzero if X is an unsigned value.
657 This can be done by referring to a part of X in place
658 or by copying to a new temporary with conversion. */
661 convert_to_mode (machine_mode mode
, rtx x
, int unsignedp
)
663 return convert_modes (mode
, VOIDmode
, x
, unsignedp
);
666 /* Return an rtx for a value that would result
667 from converting X from mode OLDMODE to mode MODE.
668 Both modes may be floating, or both integer.
669 UNSIGNEDP is nonzero if X is an unsigned value.
671 This can be done by referring to a part of X in place
672 or by copying to a new temporary with conversion.
674 You can give VOIDmode for OLDMODE, if you are sure X has a nonvoid mode. */
677 convert_modes (machine_mode mode
, machine_mode oldmode
, rtx x
, int unsignedp
)
680 scalar_int_mode int_mode
;
682 /* If FROM is a SUBREG that indicates that we have already done at least
683 the required extension, strip it. */
685 if (GET_CODE (x
) == SUBREG
686 && SUBREG_PROMOTED_VAR_P (x
)
687 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
688 && (GET_MODE_PRECISION (subreg_promoted_mode (x
))
689 >= GET_MODE_PRECISION (int_mode
))
690 && SUBREG_CHECK_PROMOTED_SIGN (x
, unsignedp
))
691 x
= gen_lowpart (int_mode
, SUBREG_REG (x
));
693 if (GET_MODE (x
) != VOIDmode
)
694 oldmode
= GET_MODE (x
);
699 if (CONST_SCALAR_INT_P (x
)
700 && is_a
<scalar_int_mode
> (mode
, &int_mode
))
702 /* If the caller did not tell us the old mode, then there is not
703 much to do with respect to canonicalization. We have to
704 assume that all the bits are significant. */
705 if (!is_a
<scalar_int_mode
> (oldmode
))
706 oldmode
= MAX_MODE_INT
;
707 wide_int w
= wide_int::from (rtx_mode_t (x
, oldmode
),
708 GET_MODE_PRECISION (int_mode
),
709 unsignedp
? UNSIGNED
: SIGNED
);
710 return immed_wide_int_const (w
, int_mode
);
713 /* We can do this with a gen_lowpart if both desired and current modes
714 are integer, and this is either a constant integer, a register, or a
716 scalar_int_mode int_oldmode
;
717 if (is_int_mode (mode
, &int_mode
)
718 && is_int_mode (oldmode
, &int_oldmode
)
719 && GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (int_oldmode
)
720 && ((MEM_P (x
) && !MEM_VOLATILE_P (x
) && direct_load
[(int) int_mode
])
721 || CONST_POLY_INT_P (x
)
723 && (!HARD_REGISTER_P (x
)
724 || targetm
.hard_regno_mode_ok (REGNO (x
), int_mode
))
725 && TRULY_NOOP_TRUNCATION_MODES_P (int_mode
, GET_MODE (x
)))))
726 return gen_lowpart (int_mode
, x
);
728 /* Converting from integer constant into mode is always equivalent to an
730 if (VECTOR_MODE_P (mode
) && GET_MODE (x
) == VOIDmode
)
732 gcc_assert (known_eq (GET_MODE_BITSIZE (mode
),
733 GET_MODE_BITSIZE (oldmode
)));
734 return simplify_gen_subreg (mode
, x
, oldmode
, 0);
737 temp
= gen_reg_rtx (mode
);
738 convert_move (temp
, x
, unsignedp
);
742 /* Return the largest alignment we can use for doing a move (or store)
743 of MAX_PIECES. ALIGN is the largest alignment we could use. */
746 alignment_for_piecewise_move (unsigned int max_pieces
, unsigned int align
)
748 scalar_int_mode tmode
749 = int_mode_for_size (max_pieces
* BITS_PER_UNIT
, 0).require ();
751 if (align
>= GET_MODE_ALIGNMENT (tmode
))
752 align
= GET_MODE_ALIGNMENT (tmode
);
755 scalar_int_mode xmode
= NARROWEST_INT_MODE
;
756 opt_scalar_int_mode mode_iter
;
757 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_INT
)
759 tmode
= mode_iter
.require ();
760 if (GET_MODE_SIZE (tmode
) > max_pieces
761 || targetm
.slow_unaligned_access (tmode
, align
))
766 align
= MAX (align
, GET_MODE_ALIGNMENT (xmode
));
772 /* Return the widest QI vector, if QI_MODE is true, or integer mode
773 that is narrower than SIZE bytes. */
775 static fixed_size_mode
776 widest_fixed_size_mode_for_size (unsigned int size
, bool qi_vector
)
778 fixed_size_mode result
= NARROWEST_INT_MODE
;
780 gcc_checking_assert (size
> 1);
782 /* Use QI vector only if size is wider than a WORD. */
783 if (qi_vector
&& size
> UNITS_PER_WORD
)
786 fixed_size_mode candidate
;
787 FOR_EACH_MODE_IN_CLASS (mode
, MODE_VECTOR_INT
)
788 if (is_a
<fixed_size_mode
> (mode
, &candidate
)
789 && GET_MODE_INNER (candidate
) == QImode
)
791 if (GET_MODE_SIZE (candidate
) >= size
)
793 if (optab_handler (vec_duplicate_optab
, candidate
)
798 if (result
!= NARROWEST_INT_MODE
)
802 opt_scalar_int_mode tmode
;
803 FOR_EACH_MODE_IN_CLASS (tmode
, MODE_INT
)
804 if (GET_MODE_SIZE (tmode
.require ()) < size
)
805 result
= tmode
.require ();
810 /* Determine whether an operation OP on LEN bytes with alignment ALIGN can
811 and should be performed piecewise. */
814 can_do_by_pieces (unsigned HOST_WIDE_INT len
, unsigned int align
,
815 enum by_pieces_operation op
)
817 return targetm
.use_by_pieces_infrastructure_p (len
, align
, op
,
818 optimize_insn_for_speed_p ());
821 /* Determine whether the LEN bytes can be moved by using several move
822 instructions. Return nonzero if a call to move_by_pieces should
826 can_move_by_pieces (unsigned HOST_WIDE_INT len
, unsigned int align
)
828 return can_do_by_pieces (len
, align
, MOVE_BY_PIECES
);
831 /* Return number of insns required to perform operation OP by pieces
832 for L bytes. ALIGN (in bits) is maximum alignment we can assume. */
834 unsigned HOST_WIDE_INT
835 by_pieces_ninsns (unsigned HOST_WIDE_INT l
, unsigned int align
,
836 unsigned int max_size
, by_pieces_operation op
)
838 unsigned HOST_WIDE_INT n_insns
= 0;
839 fixed_size_mode mode
;
841 if (targetm
.overlap_op_by_pieces_p () && op
!= COMPARE_BY_PIECES
)
843 /* NB: Round up L and ALIGN to the widest integer mode for
845 mode
= widest_fixed_size_mode_for_size (max_size
,
846 op
== SET_BY_PIECES
);
847 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
)
849 unsigned HOST_WIDE_INT up
= ROUND_UP (l
, GET_MODE_SIZE (mode
));
852 align
= GET_MODE_ALIGNMENT (mode
);
856 align
= alignment_for_piecewise_move (MOVE_MAX_PIECES
, align
);
858 while (max_size
> 1 && l
> 0)
860 mode
= widest_fixed_size_mode_for_size (max_size
,
861 op
== SET_BY_PIECES
);
862 enum insn_code icode
;
864 unsigned int modesize
= GET_MODE_SIZE (mode
);
866 icode
= optab_handler (mov_optab
, mode
);
867 if (icode
!= CODE_FOR_nothing
&& align
>= GET_MODE_ALIGNMENT (mode
))
869 unsigned HOST_WIDE_INT n_pieces
= l
/ modesize
;
877 case COMPARE_BY_PIECES
:
878 int batch
= targetm
.compare_by_pieces_branch_ratio (mode
);
879 int batch_ops
= 4 * batch
- 1;
880 unsigned HOST_WIDE_INT full
= n_pieces
/ batch
;
881 n_insns
+= full
* batch_ops
;
882 if (n_pieces
% batch
!= 0)
895 /* Used when performing piecewise block operations, holds information
896 about one of the memory objects involved. The member functions
897 can be used to generate code for loading from the object and
898 updating the address when iterating. */
902 /* The object being referenced, a MEM. Can be NULL_RTX to indicate
905 /* The address of the object. Can differ from that seen in the
906 MEM rtx if we copied the address to a register. */
908 /* Nonzero if the address on the object has an autoincrement already,
909 signifies whether that was an increment or decrement. */
910 signed char m_addr_inc
;
911 /* Nonzero if we intend to use autoinc without the address already
912 having autoinc form. We will insert add insns around each memory
913 reference, expecting later passes to form autoinc addressing modes.
914 The only supported options are predecrement and postincrement. */
915 signed char m_explicit_inc
;
916 /* True if we have either of the two possible cases of using
919 /* True if this is an address to be used for load operations rather
923 /* Optionally, a function to obtain constants for any given offset into
924 the objects, and data associated with it. */
925 by_pieces_constfn m_constfn
;
928 pieces_addr (rtx
, bool, by_pieces_constfn
, void *);
929 rtx
adjust (fixed_size_mode
, HOST_WIDE_INT
, by_pieces_prev
* = nullptr);
930 void increment_address (HOST_WIDE_INT
);
931 void maybe_predec (HOST_WIDE_INT
);
932 void maybe_postinc (HOST_WIDE_INT
);
933 void decide_autoinc (machine_mode
, bool, HOST_WIDE_INT
);
940 /* Initialize a pieces_addr structure from an object OBJ. IS_LOAD is
941 true if the operation to be performed on this object is a load
942 rather than a store. For stores, OBJ can be NULL, in which case we
943 assume the operation is a stack push. For loads, the optional
944 CONSTFN and its associated CFNDATA can be used in place of the
947 pieces_addr::pieces_addr (rtx obj
, bool is_load
, by_pieces_constfn constfn
,
949 : m_obj (obj
), m_is_load (is_load
), m_constfn (constfn
), m_cfndata (cfndata
)
955 rtx addr
= XEXP (obj
, 0);
956 rtx_code code
= GET_CODE (addr
);
958 bool dec
= code
== PRE_DEC
|| code
== POST_DEC
;
959 bool inc
= code
== PRE_INC
|| code
== POST_INC
;
962 m_addr_inc
= dec
? -1 : 1;
964 /* While we have always looked for these codes here, the code
965 implementing the memory operation has never handled them.
966 Support could be added later if necessary or beneficial. */
967 gcc_assert (code
!= PRE_INC
&& code
!= POST_DEC
);
975 if (STACK_GROWS_DOWNWARD
)
981 gcc_assert (constfn
!= NULL
);
985 gcc_assert (is_load
);
988 /* Decide whether to use autoinc for an address involved in a memory op.
989 MODE is the mode of the accesses, REVERSE is true if we've decided to
990 perform the operation starting from the end, and LEN is the length of
991 the operation. Don't override an earlier decision to set m_auto. */
994 pieces_addr::decide_autoinc (machine_mode
ARG_UNUSED (mode
), bool reverse
,
997 if (m_auto
|| m_obj
== NULL_RTX
)
1000 bool use_predec
= (m_is_load
1001 ? USE_LOAD_PRE_DECREMENT (mode
)
1002 : USE_STORE_PRE_DECREMENT (mode
));
1003 bool use_postinc
= (m_is_load
1004 ? USE_LOAD_POST_INCREMENT (mode
)
1005 : USE_STORE_POST_INCREMENT (mode
));
1006 machine_mode addr_mode
= get_address_mode (m_obj
);
1008 if (use_predec
&& reverse
)
1010 m_addr
= copy_to_mode_reg (addr_mode
,
1011 plus_constant (addr_mode
,
1014 m_explicit_inc
= -1;
1016 else if (use_postinc
&& !reverse
)
1018 m_addr
= copy_to_mode_reg (addr_mode
, m_addr
);
1022 else if (CONSTANT_P (m_addr
))
1023 m_addr
= copy_to_mode_reg (addr_mode
, m_addr
);
1026 /* Adjust the address to refer to the data at OFFSET in MODE. If we
1027 are using autoincrement for this address, we don't add the offset,
1028 but we still modify the MEM's properties. */
1031 pieces_addr::adjust (fixed_size_mode mode
, HOST_WIDE_INT offset
,
1032 by_pieces_prev
*prev
)
1035 /* Pass the previous data to m_constfn. */
1036 return m_constfn (m_cfndata
, prev
, offset
, mode
);
1037 if (m_obj
== NULL_RTX
)
1040 return adjust_automodify_address (m_obj
, mode
, m_addr
, offset
);
1042 return adjust_address (m_obj
, mode
, offset
);
1045 /* Emit an add instruction to increment the address by SIZE. */
1048 pieces_addr::increment_address (HOST_WIDE_INT size
)
1050 rtx amount
= gen_int_mode (size
, GET_MODE (m_addr
));
1051 emit_insn (gen_add2_insn (m_addr
, amount
));
1054 /* If we are supposed to decrement the address after each access, emit code
1055 to do so now. Increment by SIZE (which has should have the correct sign
1059 pieces_addr::maybe_predec (HOST_WIDE_INT size
)
1061 if (m_explicit_inc
>= 0)
1063 gcc_assert (HAVE_PRE_DECREMENT
);
1064 increment_address (size
);
1067 /* If we are supposed to decrement the address after each access, emit code
1068 to do so now. Increment by SIZE. */
1071 pieces_addr::maybe_postinc (HOST_WIDE_INT size
)
1073 if (m_explicit_inc
<= 0)
1075 gcc_assert (HAVE_POST_INCREMENT
);
1076 increment_address (size
);
1079 /* This structure is used by do_op_by_pieces to describe the operation
1082 class op_by_pieces_d
1085 fixed_size_mode
get_usable_mode (fixed_size_mode
, unsigned int);
1086 fixed_size_mode
smallest_fixed_size_mode_for_size (unsigned int);
1089 pieces_addr m_to
, m_from
;
1090 /* Make m_len read-only so that smallest_fixed_size_mode_for_size can
1091 use it to check the valid mode size. */
1092 const unsigned HOST_WIDE_INT m_len
;
1093 HOST_WIDE_INT m_offset
;
1094 unsigned int m_align
;
1095 unsigned int m_max_size
;
1097 /* True if this is a stack push. */
1099 /* True if targetm.overlap_op_by_pieces_p () returns true. */
1100 bool m_overlap_op_by_pieces
;
1101 /* True if QI vector mode can be used. */
1102 bool m_qi_vector_mode
;
1104 /* Virtual functions, overriden by derived classes for the specific
1106 virtual void generate (rtx
, rtx
, machine_mode
) = 0;
1107 virtual bool prepare_mode (machine_mode
, unsigned int) = 0;
1108 virtual void finish_mode (machine_mode
)
1113 op_by_pieces_d (unsigned int, rtx
, bool, rtx
, bool, by_pieces_constfn
,
1114 void *, unsigned HOST_WIDE_INT
, unsigned int, bool,
1119 /* The constructor for an op_by_pieces_d structure. We require two
1120 objects named TO and FROM, which are identified as loads or stores
1121 by TO_LOAD and FROM_LOAD. If FROM is a load, the optional FROM_CFN
1122 and its associated FROM_CFN_DATA can be used to replace loads with
1123 constant values. MAX_PIECES describes the maximum number of bytes
1124 at a time which can be moved efficiently. LEN describes the length
1125 of the operation. */
1127 op_by_pieces_d::op_by_pieces_d (unsigned int max_pieces
, rtx to
,
1128 bool to_load
, rtx from
, bool from_load
,
1129 by_pieces_constfn from_cfn
,
1130 void *from_cfn_data
,
1131 unsigned HOST_WIDE_INT len
,
1132 unsigned int align
, bool push
,
1133 bool qi_vector_mode
)
1134 : m_to (to
, to_load
, NULL
, NULL
),
1135 m_from (from
, from_load
, from_cfn
, from_cfn_data
),
1136 m_len (len
), m_max_size (max_pieces
+ 1),
1137 m_push (push
), m_qi_vector_mode (qi_vector_mode
)
1139 int toi
= m_to
.get_addr_inc ();
1140 int fromi
= m_from
.get_addr_inc ();
1141 if (toi
>= 0 && fromi
>= 0)
1143 else if (toi
<= 0 && fromi
<= 0)
1148 m_offset
= m_reverse
? len
: 0;
1149 align
= MIN (to
? MEM_ALIGN (to
) : align
,
1150 from
? MEM_ALIGN (from
) : align
);
1152 /* If copying requires more than two move insns,
1153 copy addresses to registers (to make displacements shorter)
1154 and use post-increment if available. */
1155 if (by_pieces_ninsns (len
, align
, m_max_size
, MOVE_BY_PIECES
) > 2)
1157 /* Find the mode of the largest comparison. */
1158 fixed_size_mode mode
1159 = widest_fixed_size_mode_for_size (m_max_size
,
1162 m_from
.decide_autoinc (mode
, m_reverse
, len
);
1163 m_to
.decide_autoinc (mode
, m_reverse
, len
);
1166 align
= alignment_for_piecewise_move (MOVE_MAX_PIECES
, align
);
1169 m_overlap_op_by_pieces
= targetm
.overlap_op_by_pieces_p ();
1172 /* This function returns the largest usable integer mode for LEN bytes
1173 whose size is no bigger than size of MODE. */
1176 op_by_pieces_d::get_usable_mode (fixed_size_mode mode
, unsigned int len
)
1181 size
= GET_MODE_SIZE (mode
);
1182 if (len
>= size
&& prepare_mode (mode
, m_align
))
1184 /* widest_fixed_size_mode_for_size checks SIZE > 1. */
1185 mode
= widest_fixed_size_mode_for_size (size
, m_qi_vector_mode
);
1191 /* Return the smallest integer or QI vector mode that is not narrower
1195 op_by_pieces_d::smallest_fixed_size_mode_for_size (unsigned int size
)
1197 /* Use QI vector only for > size of WORD. */
1198 if (m_qi_vector_mode
&& size
> UNITS_PER_WORD
)
1201 fixed_size_mode candidate
;
1202 FOR_EACH_MODE_IN_CLASS (mode
, MODE_VECTOR_INT
)
1203 if (is_a
<fixed_size_mode
> (mode
, &candidate
)
1204 && GET_MODE_INNER (candidate
) == QImode
)
1206 /* Don't return a mode wider than M_LEN. */
1207 if (GET_MODE_SIZE (candidate
) > m_len
)
1210 if (GET_MODE_SIZE (candidate
) >= size
1211 && (optab_handler (vec_duplicate_optab
, candidate
)
1212 != CODE_FOR_nothing
))
1217 return smallest_int_mode_for_size (size
* BITS_PER_UNIT
);
1220 /* This function contains the main loop used for expanding a block
1221 operation. First move what we can in the largest integer mode,
1222 then go to successively smaller modes. For every access, call
1223 GENFUN with the two operands and the EXTRA_DATA. */
1226 op_by_pieces_d::run ()
1231 unsigned HOST_WIDE_INT length
= m_len
;
1233 /* widest_fixed_size_mode_for_size checks M_MAX_SIZE > 1. */
1234 fixed_size_mode mode
1235 = widest_fixed_size_mode_for_size (m_max_size
, m_qi_vector_mode
);
1236 mode
= get_usable_mode (mode
, length
);
1238 by_pieces_prev to_prev
= { nullptr, mode
};
1239 by_pieces_prev from_prev
= { nullptr, mode
};
1243 unsigned int size
= GET_MODE_SIZE (mode
);
1244 rtx to1
= NULL_RTX
, from1
;
1246 while (length
>= size
)
1251 to1
= m_to
.adjust (mode
, m_offset
, &to_prev
);
1253 to_prev
.mode
= mode
;
1254 from1
= m_from
.adjust (mode
, m_offset
, &from_prev
);
1255 from_prev
.data
= from1
;
1256 from_prev
.mode
= mode
;
1258 m_to
.maybe_predec (-(HOST_WIDE_INT
)size
);
1259 m_from
.maybe_predec (-(HOST_WIDE_INT
)size
);
1261 generate (to1
, from1
, mode
);
1263 m_to
.maybe_postinc (size
);
1264 m_from
.maybe_postinc (size
);
1277 if (!m_push
&& m_overlap_op_by_pieces
)
1279 /* NB: Generate overlapping operations if it is not a stack
1280 push since stack push must not overlap. Get the smallest
1281 fixed size mode for M_LEN bytes. */
1282 mode
= smallest_fixed_size_mode_for_size (length
);
1283 mode
= get_usable_mode (mode
, GET_MODE_SIZE (mode
));
1284 int gap
= GET_MODE_SIZE (mode
) - length
;
1287 /* If size of MODE > M_LEN, generate the last operation
1288 in MODE for the remaining bytes with ovelapping memory
1289 from the previois operation. */
1299 /* widest_fixed_size_mode_for_size checks SIZE > 1. */
1300 mode
= widest_fixed_size_mode_for_size (size
,
1302 mode
= get_usable_mode (mode
, length
);
1307 /* The code above should have handled everything. */
1308 gcc_assert (!length
);
1311 /* Derived class from op_by_pieces_d, providing support for block move
1314 #ifdef PUSH_ROUNDING
1315 #define PUSHG_P(to) ((to) == nullptr)
1317 #define PUSHG_P(to) false
1320 class move_by_pieces_d
: public op_by_pieces_d
1322 insn_gen_fn m_gen_fun
;
1323 void generate (rtx
, rtx
, machine_mode
);
1324 bool prepare_mode (machine_mode
, unsigned int);
1327 move_by_pieces_d (rtx to
, rtx from
, unsigned HOST_WIDE_INT len
,
1329 : op_by_pieces_d (MOVE_MAX_PIECES
, to
, false, from
, true, NULL
,
1330 NULL
, len
, align
, PUSHG_P (to
))
1333 rtx
finish_retmode (memop_ret
);
1336 /* Return true if MODE can be used for a set of copies, given an
1337 alignment ALIGN. Prepare whatever data is necessary for later
1338 calls to generate. */
1341 move_by_pieces_d::prepare_mode (machine_mode mode
, unsigned int align
)
1343 insn_code icode
= optab_handler (mov_optab
, mode
);
1344 m_gen_fun
= GEN_FCN (icode
);
1345 return icode
!= CODE_FOR_nothing
&& align
>= GET_MODE_ALIGNMENT (mode
);
1348 /* A callback used when iterating for a compare_by_pieces_operation.
1349 OP0 and OP1 are the values that have been loaded and should be
1350 compared in MODE. If OP0 is NULL, this means we should generate a
1351 push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1352 gen function that should be used to generate the mode. */
1355 move_by_pieces_d::generate (rtx op0
, rtx op1
,
1356 machine_mode mode ATTRIBUTE_UNUSED
)
1358 #ifdef PUSH_ROUNDING
1359 if (op0
== NULL_RTX
)
1361 emit_single_push_insn (mode
, op1
, NULL
);
1365 emit_insn (m_gen_fun (op0
, op1
));
1368 /* Perform the final adjustment at the end of a string to obtain the
1369 correct return value for the block operation.
1370 Return value is based on RETMODE argument. */
1373 move_by_pieces_d::finish_retmode (memop_ret retmode
)
1375 gcc_assert (!m_reverse
);
1376 if (retmode
== RETURN_END_MINUS_ONE
)
1378 m_to
.maybe_postinc (-1);
1381 return m_to
.adjust (QImode
, m_offset
);
1384 /* Generate several move instructions to copy LEN bytes from block FROM to
1385 block TO. (These are MEM rtx's with BLKmode).
1387 If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1388 used to push FROM to the stack.
1390 ALIGN is maximum stack alignment we can assume.
1392 Return value is based on RETMODE argument. */
1395 move_by_pieces (rtx to
, rtx from
, unsigned HOST_WIDE_INT len
,
1396 unsigned int align
, memop_ret retmode
)
1398 #ifndef PUSH_ROUNDING
1403 move_by_pieces_d
data (to
, from
, len
, align
);
1407 if (retmode
!= RETURN_BEGIN
)
1408 return data
.finish_retmode (retmode
);
1413 /* Derived class from op_by_pieces_d, providing support for block move
1416 class store_by_pieces_d
: public op_by_pieces_d
1418 insn_gen_fn m_gen_fun
;
1419 void generate (rtx
, rtx
, machine_mode
);
1420 bool prepare_mode (machine_mode
, unsigned int);
1423 store_by_pieces_d (rtx to
, by_pieces_constfn cfn
, void *cfn_data
,
1424 unsigned HOST_WIDE_INT len
, unsigned int align
,
1425 bool qi_vector_mode
)
1426 : op_by_pieces_d (STORE_MAX_PIECES
, to
, false, NULL_RTX
, true, cfn
,
1427 cfn_data
, len
, align
, false, qi_vector_mode
)
1430 rtx
finish_retmode (memop_ret
);
1433 /* Return true if MODE can be used for a set of stores, given an
1434 alignment ALIGN. Prepare whatever data is necessary for later
1435 calls to generate. */
1438 store_by_pieces_d::prepare_mode (machine_mode mode
, unsigned int align
)
1440 insn_code icode
= optab_handler (mov_optab
, mode
);
1441 m_gen_fun
= GEN_FCN (icode
);
1442 return icode
!= CODE_FOR_nothing
&& align
>= GET_MODE_ALIGNMENT (mode
);
1445 /* A callback used when iterating for a store_by_pieces_operation.
1446 OP0 and OP1 are the values that have been loaded and should be
1447 compared in MODE. If OP0 is NULL, this means we should generate a
1448 push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1449 gen function that should be used to generate the mode. */
1452 store_by_pieces_d::generate (rtx op0
, rtx op1
, machine_mode
)
1454 emit_insn (m_gen_fun (op0
, op1
));
1457 /* Perform the final adjustment at the end of a string to obtain the
1458 correct return value for the block operation.
1459 Return value is based on RETMODE argument. */
1462 store_by_pieces_d::finish_retmode (memop_ret retmode
)
1464 gcc_assert (!m_reverse
);
1465 if (retmode
== RETURN_END_MINUS_ONE
)
1467 m_to
.maybe_postinc (-1);
1470 return m_to
.adjust (QImode
, m_offset
);
1473 /* Determine whether the LEN bytes generated by CONSTFUN can be
1474 stored to memory using several move instructions. CONSTFUNDATA is
1475 a pointer which will be passed as argument in every CONSTFUN call.
1476 ALIGN is maximum alignment we can assume. MEMSETP is true if this is
1477 a memset operation and false if it's a copy of a constant string.
1478 Return nonzero if a call to store_by_pieces should succeed. */
1481 can_store_by_pieces (unsigned HOST_WIDE_INT len
,
1482 by_pieces_constfn constfun
,
1483 void *constfundata
, unsigned int align
, bool memsetp
)
1485 unsigned HOST_WIDE_INT l
;
1486 unsigned int max_size
;
1487 HOST_WIDE_INT offset
= 0;
1488 enum insn_code icode
;
1490 /* cst is set but not used if LEGITIMATE_CONSTANT doesn't use it. */
1491 rtx cst ATTRIBUTE_UNUSED
;
1496 if (!targetm
.use_by_pieces_infrastructure_p (len
, align
,
1500 optimize_insn_for_speed_p ()))
1503 align
= alignment_for_piecewise_move (STORE_MAX_PIECES
, align
);
1505 /* We would first store what we can in the largest integer mode, then go to
1506 successively smaller modes. */
1509 reverse
<= (HAVE_PRE_DECREMENT
|| HAVE_POST_DECREMENT
);
1513 max_size
= STORE_MAX_PIECES
+ 1;
1514 while (max_size
> 1 && l
> 0)
1516 fixed_size_mode mode
1517 = widest_fixed_size_mode_for_size (max_size
, memsetp
);
1519 icode
= optab_handler (mov_optab
, mode
);
1520 if (icode
!= CODE_FOR_nothing
1521 && align
>= GET_MODE_ALIGNMENT (mode
))
1523 unsigned int size
= GET_MODE_SIZE (mode
);
1530 cst
= (*constfun
) (constfundata
, nullptr, offset
, mode
);
1531 /* All CONST_VECTORs can be loaded for memset since
1532 vec_duplicate_optab is a precondition to pick a
1533 vector mode for the memset expander. */
1534 if (!((memsetp
&& VECTOR_MODE_P (mode
))
1535 || targetm
.legitimate_constant_p (mode
, cst
)))
1545 max_size
= GET_MODE_SIZE (mode
);
1548 /* The code above should have handled everything. */
1555 /* Generate several move instructions to store LEN bytes generated by
1556 CONSTFUN to block TO. (A MEM rtx with BLKmode). CONSTFUNDATA is a
1557 pointer which will be passed as argument in every CONSTFUN call.
1558 ALIGN is maximum alignment we can assume. MEMSETP is true if this is
1559 a memset operation and false if it's a copy of a constant string.
1560 Return value is based on RETMODE argument. */
1563 store_by_pieces (rtx to
, unsigned HOST_WIDE_INT len
,
1564 by_pieces_constfn constfun
,
1565 void *constfundata
, unsigned int align
, bool memsetp
,
1570 gcc_assert (retmode
!= RETURN_END_MINUS_ONE
);
1574 gcc_assert (targetm
.use_by_pieces_infrastructure_p
1576 memsetp
? SET_BY_PIECES
: STORE_BY_PIECES
,
1577 optimize_insn_for_speed_p ()));
1579 store_by_pieces_d
data (to
, constfun
, constfundata
, len
, align
,
1583 if (retmode
!= RETURN_BEGIN
)
1584 return data
.finish_retmode (retmode
);
1589 /* Generate several move instructions to clear LEN bytes of block TO. (A MEM
1590 rtx with BLKmode). ALIGN is maximum alignment we can assume. */
1593 clear_by_pieces (rtx to
, unsigned HOST_WIDE_INT len
, unsigned int align
)
1598 /* Use builtin_memset_read_str to support vector mode broadcast. */
1600 store_by_pieces_d
data (to
, builtin_memset_read_str
, &c
, len
, align
,
1605 /* Context used by compare_by_pieces_genfn. It stores the fail label
1606 to jump to in case of miscomparison, and for branch ratios greater than 1,
1607 it stores an accumulator and the current and maximum counts before
1608 emitting another branch. */
1610 class compare_by_pieces_d
: public op_by_pieces_d
1612 rtx_code_label
*m_fail_label
;
1614 int m_count
, m_batch
;
1616 void generate (rtx
, rtx
, machine_mode
);
1617 bool prepare_mode (machine_mode
, unsigned int);
1618 void finish_mode (machine_mode
);
1620 compare_by_pieces_d (rtx op0
, rtx op1
, by_pieces_constfn op1_cfn
,
1621 void *op1_cfn_data
, HOST_WIDE_INT len
, int align
,
1622 rtx_code_label
*fail_label
)
1623 : op_by_pieces_d (COMPARE_MAX_PIECES
, op0
, true, op1
, true, op1_cfn
,
1624 op1_cfn_data
, len
, align
, false)
1626 m_fail_label
= fail_label
;
1630 /* A callback used when iterating for a compare_by_pieces_operation.
1631 OP0 and OP1 are the values that have been loaded and should be
1632 compared in MODE. DATA holds a pointer to the compare_by_pieces_data
1633 context structure. */
1636 compare_by_pieces_d::generate (rtx op0
, rtx op1
, machine_mode mode
)
1640 rtx temp
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
,
1641 true, OPTAB_LIB_WIDEN
);
1643 temp
= expand_binop (mode
, ior_optab
, m_accumulator
, temp
, temp
,
1644 true, OPTAB_LIB_WIDEN
);
1645 m_accumulator
= temp
;
1647 if (++m_count
< m_batch
)
1651 op0
= m_accumulator
;
1653 m_accumulator
= NULL_RTX
;
1655 do_compare_rtx_and_jump (op0
, op1
, NE
, true, mode
, NULL_RTX
, NULL
,
1656 m_fail_label
, profile_probability::uninitialized ());
1659 /* Return true if MODE can be used for a set of moves and comparisons,
1660 given an alignment ALIGN. Prepare whatever data is necessary for
1661 later calls to generate. */
1664 compare_by_pieces_d::prepare_mode (machine_mode mode
, unsigned int align
)
1666 insn_code icode
= optab_handler (mov_optab
, mode
);
1667 if (icode
== CODE_FOR_nothing
1668 || align
< GET_MODE_ALIGNMENT (mode
)
1669 || !can_compare_p (EQ
, mode
, ccp_jump
))
1671 m_batch
= targetm
.compare_by_pieces_branch_ratio (mode
);
1674 m_accumulator
= NULL_RTX
;
1679 /* Called after expanding a series of comparisons in MODE. If we have
1680 accumulated results for which we haven't emitted a branch yet, do
1684 compare_by_pieces_d::finish_mode (machine_mode mode
)
1686 if (m_accumulator
!= NULL_RTX
)
1687 do_compare_rtx_and_jump (m_accumulator
, const0_rtx
, NE
, true, mode
,
1688 NULL_RTX
, NULL
, m_fail_label
,
1689 profile_probability::uninitialized ());
1692 /* Generate several move instructions to compare LEN bytes from blocks
1693 ARG0 and ARG1. (These are MEM rtx's with BLKmode).
1695 If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1696 used to push FROM to the stack.
1698 ALIGN is maximum stack alignment we can assume.
1700 Optionally, the caller can pass a constfn and associated data in A1_CFN
1701 and A1_CFN_DATA. describing that the second operand being compared is a
1702 known constant and how to obtain its data. */
1705 compare_by_pieces (rtx arg0
, rtx arg1
, unsigned HOST_WIDE_INT len
,
1706 rtx target
, unsigned int align
,
1707 by_pieces_constfn a1_cfn
, void *a1_cfn_data
)
1709 rtx_code_label
*fail_label
= gen_label_rtx ();
1710 rtx_code_label
*end_label
= gen_label_rtx ();
1712 if (target
== NULL_RTX
1713 || !REG_P (target
) || REGNO (target
) < FIRST_PSEUDO_REGISTER
)
1714 target
= gen_reg_rtx (TYPE_MODE (integer_type_node
));
1716 compare_by_pieces_d
data (arg0
, arg1
, a1_cfn
, a1_cfn_data
, len
, align
,
1721 emit_move_insn (target
, const0_rtx
);
1722 emit_jump (end_label
);
1724 emit_label (fail_label
);
1725 emit_move_insn (target
, const1_rtx
);
1726 emit_label (end_label
);
1731 /* Emit code to move a block Y to a block X. This may be done with
1732 string-move instructions, with multiple scalar move instructions,
1733 or with a library call.
1735 Both X and Y must be MEM rtx's (perhaps inside VOLATILE) with mode BLKmode.
1736 SIZE is an rtx that says how long they are.
1737 ALIGN is the maximum alignment we can assume they have.
1738 METHOD describes what kind of copy this is, and what mechanisms may be used.
1739 MIN_SIZE is the minimal size of block to move
1740 MAX_SIZE is the maximal size of block to move, if it cannot be represented
1741 in unsigned HOST_WIDE_INT, than it is mask of all ones.
1743 Return the address of the new block, if memcpy is called and returns it,
1747 emit_block_move_hints (rtx x
, rtx y
, rtx size
, enum block_op_methods method
,
1748 unsigned int expected_align
, HOST_WIDE_INT expected_size
,
1749 unsigned HOST_WIDE_INT min_size
,
1750 unsigned HOST_WIDE_INT max_size
,
1751 unsigned HOST_WIDE_INT probable_max_size
,
1752 bool bail_out_libcall
, bool *is_move_done
,
1760 *is_move_done
= true;
1763 if (CONST_INT_P (size
) && INTVAL (size
) == 0)
1768 case BLOCK_OP_NORMAL
:
1769 case BLOCK_OP_TAILCALL
:
1773 case BLOCK_OP_CALL_PARM
:
1774 may_use_call
= block_move_libcall_safe_for_call_parm ();
1776 /* Make inhibit_defer_pop nonzero around the library call
1777 to force it to pop the arguments right away. */
1781 case BLOCK_OP_NO_LIBCALL
:
1785 case BLOCK_OP_NO_LIBCALL_RET
:
1793 gcc_assert (MEM_P (x
) && MEM_P (y
));
1794 align
= MIN (MEM_ALIGN (x
), MEM_ALIGN (y
));
1795 gcc_assert (align
>= BITS_PER_UNIT
);
1797 /* Make sure we've got BLKmode addresses; store_one_arg can decide that
1798 block copy is more efficient for other large modes, e.g. DCmode. */
1799 x
= adjust_address (x
, BLKmode
, 0);
1800 y
= adjust_address (y
, BLKmode
, 0);
1802 /* If source and destination are the same, no need to copy anything. */
1803 if (rtx_equal_p (x
, y
)
1804 && !MEM_VOLATILE_P (x
)
1805 && !MEM_VOLATILE_P (y
))
1808 /* Set MEM_SIZE as appropriate for this block copy. The main place this
1809 can be incorrect is coming from __builtin_memcpy. */
1810 poly_int64 const_size
;
1811 if (poly_int_rtx_p (size
, &const_size
))
1813 x
= shallow_copy_rtx (x
);
1814 y
= shallow_copy_rtx (y
);
1815 set_mem_size (x
, const_size
);
1816 set_mem_size (y
, const_size
);
1819 bool pieces_ok
= CONST_INT_P (size
)
1820 && can_move_by_pieces (INTVAL (size
), align
);
1821 bool pattern_ok
= false;
1823 if (!pieces_ok
|| might_overlap
)
1826 = emit_block_move_via_pattern (x
, y
, size
, align
,
1827 expected_align
, expected_size
,
1828 min_size
, max_size
, probable_max_size
,
1830 if (!pattern_ok
&& might_overlap
)
1832 /* Do not try any of the other methods below as they are not safe
1833 for overlapping moves. */
1834 *is_move_done
= false;
1842 move_by_pieces (x
, y
, INTVAL (size
), align
, RETURN_BEGIN
);
1843 else if (may_use_call
&& !might_overlap
1844 && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x
))
1845 && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (y
)))
1847 if (bail_out_libcall
)
1850 *is_move_done
= false;
1854 if (may_use_call
< 0)
1857 retval
= emit_block_copy_via_libcall (x
, y
, size
,
1858 method
== BLOCK_OP_TAILCALL
);
1860 else if (might_overlap
)
1861 *is_move_done
= false;
1863 emit_block_move_via_loop (x
, y
, size
, align
);
1865 if (method
== BLOCK_OP_CALL_PARM
)
1872 emit_block_move (rtx x
, rtx y
, rtx size
, enum block_op_methods method
)
1874 unsigned HOST_WIDE_INT max
, min
= 0;
1875 if (GET_CODE (size
) == CONST_INT
)
1876 min
= max
= UINTVAL (size
);
1878 max
= GET_MODE_MASK (GET_MODE (size
));
1879 return emit_block_move_hints (x
, y
, size
, method
, 0, -1,
1883 /* A subroutine of emit_block_move. Returns true if calling the
1884 block move libcall will not clobber any parameters which may have
1885 already been placed on the stack. */
1888 block_move_libcall_safe_for_call_parm (void)
1892 /* If arguments are pushed on the stack, then they're safe. */
1893 if (targetm
.calls
.push_argument (0))
1896 /* If registers go on the stack anyway, any argument is sure to clobber
1897 an outgoing argument. */
1898 #if defined (REG_PARM_STACK_SPACE)
1899 fn
= builtin_decl_implicit (BUILT_IN_MEMCPY
);
1900 /* Avoid set but not used warning if *REG_PARM_STACK_SPACE doesn't
1901 depend on its argument. */
1903 if (OUTGOING_REG_PARM_STACK_SPACE ((!fn
? NULL_TREE
: TREE_TYPE (fn
)))
1904 && REG_PARM_STACK_SPACE (fn
) != 0)
1908 /* If any argument goes in memory, then it might clobber an outgoing
1911 CUMULATIVE_ARGS args_so_far_v
;
1912 cumulative_args_t args_so_far
;
1915 fn
= builtin_decl_implicit (BUILT_IN_MEMCPY
);
1916 INIT_CUMULATIVE_ARGS (args_so_far_v
, TREE_TYPE (fn
), NULL_RTX
, 0, 3);
1917 args_so_far
= pack_cumulative_args (&args_so_far_v
);
1919 arg
= TYPE_ARG_TYPES (TREE_TYPE (fn
));
1920 for ( ; arg
!= void_list_node
; arg
= TREE_CHAIN (arg
))
1922 machine_mode mode
= TYPE_MODE (TREE_VALUE (arg
));
1923 function_arg_info
arg_info (mode
, /*named=*/true);
1924 rtx tmp
= targetm
.calls
.function_arg (args_so_far
, arg_info
);
1925 if (!tmp
|| !REG_P (tmp
))
1927 if (targetm
.calls
.arg_partial_bytes (args_so_far
, arg_info
))
1929 targetm
.calls
.function_arg_advance (args_so_far
, arg_info
);
1935 /* A subroutine of emit_block_move. Expand a cpymem or movmem pattern;
1936 return true if successful.
1938 X is the destination of the copy or move.
1939 Y is the source of the copy or move.
1940 SIZE is the size of the block to be moved.
1942 MIGHT_OVERLAP indicates this originated with expansion of a
1943 builtin_memmove() and the source and destination blocks may
1948 emit_block_move_via_pattern (rtx x
, rtx y
, rtx size
, unsigned int align
,
1949 unsigned int expected_align
,
1950 HOST_WIDE_INT expected_size
,
1951 unsigned HOST_WIDE_INT min_size
,
1952 unsigned HOST_WIDE_INT max_size
,
1953 unsigned HOST_WIDE_INT probable_max_size
,
1956 if (expected_align
< align
)
1957 expected_align
= align
;
1958 if (expected_size
!= -1)
1960 if ((unsigned HOST_WIDE_INT
)expected_size
> probable_max_size
)
1961 expected_size
= probable_max_size
;
1962 if ((unsigned HOST_WIDE_INT
)expected_size
< min_size
)
1963 expected_size
= min_size
;
1966 /* Since this is a move insn, we don't care about volatility. */
1967 temporary_volatile_ok
v (true);
1969 /* Try the most limited insn first, because there's no point
1970 including more than one in the machine description unless
1971 the more limited one has some advantage. */
1973 opt_scalar_int_mode mode_iter
;
1974 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_INT
)
1976 scalar_int_mode mode
= mode_iter
.require ();
1977 enum insn_code code
;
1979 code
= direct_optab_handler (movmem_optab
, mode
);
1981 code
= direct_optab_handler (cpymem_optab
, mode
);
1983 if (code
!= CODE_FOR_nothing
1984 /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
1985 here because if SIZE is less than the mode mask, as it is
1986 returned by the macro, it will definitely be less than the
1987 actual mode mask. Since SIZE is within the Pmode address
1988 space, we limit MODE to Pmode. */
1989 && ((CONST_INT_P (size
)
1990 && ((unsigned HOST_WIDE_INT
) INTVAL (size
)
1991 <= (GET_MODE_MASK (mode
) >> 1)))
1992 || max_size
<= (GET_MODE_MASK (mode
) >> 1)
1993 || GET_MODE_BITSIZE (mode
) >= GET_MODE_BITSIZE (Pmode
)))
1995 class expand_operand ops
[9];
1998 /* ??? When called via emit_block_move_for_call, it'd be
1999 nice if there were some way to inform the backend, so
2000 that it doesn't fail the expansion because it thinks
2001 emitting the libcall would be more efficient. */
2002 nops
= insn_data
[(int) code
].n_generator_args
;
2003 gcc_assert (nops
== 4 || nops
== 6 || nops
== 8 || nops
== 9);
2005 create_fixed_operand (&ops
[0], x
);
2006 create_fixed_operand (&ops
[1], y
);
2007 /* The check above guarantees that this size conversion is valid. */
2008 create_convert_operand_to (&ops
[2], size
, mode
, true);
2009 create_integer_operand (&ops
[3], align
/ BITS_PER_UNIT
);
2012 create_integer_operand (&ops
[4], expected_align
/ BITS_PER_UNIT
);
2013 create_integer_operand (&ops
[5], expected_size
);
2017 create_integer_operand (&ops
[6], min_size
);
2018 /* If we cannot represent the maximal size,
2019 make parameter NULL. */
2020 if ((HOST_WIDE_INT
) max_size
!= -1)
2021 create_integer_operand (&ops
[7], max_size
);
2023 create_fixed_operand (&ops
[7], NULL
);
2027 /* If we cannot represent the maximal size,
2028 make parameter NULL. */
2029 if ((HOST_WIDE_INT
) probable_max_size
!= -1)
2030 create_integer_operand (&ops
[8], probable_max_size
);
2032 create_fixed_operand (&ops
[8], NULL
);
2034 if (maybe_expand_insn (code
, nops
, ops
))
2042 /* A subroutine of emit_block_move. Copy the data via an explicit
2043 loop. This is used only when libcalls are forbidden. */
2044 /* ??? It'd be nice to copy in hunks larger than QImode. */
2047 emit_block_move_via_loop (rtx x
, rtx y
, rtx size
,
2048 unsigned int align ATTRIBUTE_UNUSED
)
2050 rtx_code_label
*cmp_label
, *top_label
;
2051 rtx iter
, x_addr
, y_addr
, tmp
;
2052 machine_mode x_addr_mode
= get_address_mode (x
);
2053 machine_mode y_addr_mode
= get_address_mode (y
);
2054 machine_mode iter_mode
;
2056 iter_mode
= GET_MODE (size
);
2057 if (iter_mode
== VOIDmode
)
2058 iter_mode
= word_mode
;
2060 top_label
= gen_label_rtx ();
2061 cmp_label
= gen_label_rtx ();
2062 iter
= gen_reg_rtx (iter_mode
);
2064 emit_move_insn (iter
, const0_rtx
);
2066 x_addr
= force_operand (XEXP (x
, 0), NULL_RTX
);
2067 y_addr
= force_operand (XEXP (y
, 0), NULL_RTX
);
2068 do_pending_stack_adjust ();
2070 emit_jump (cmp_label
);
2071 emit_label (top_label
);
2073 tmp
= convert_modes (x_addr_mode
, iter_mode
, iter
, true);
2074 x_addr
= simplify_gen_binary (PLUS
, x_addr_mode
, x_addr
, tmp
);
2076 if (x_addr_mode
!= y_addr_mode
)
2077 tmp
= convert_modes (y_addr_mode
, iter_mode
, iter
, true);
2078 y_addr
= simplify_gen_binary (PLUS
, y_addr_mode
, y_addr
, tmp
);
2080 x
= change_address (x
, QImode
, x_addr
);
2081 y
= change_address (y
, QImode
, y_addr
);
2083 emit_move_insn (x
, y
);
2085 tmp
= expand_simple_binop (iter_mode
, PLUS
, iter
, const1_rtx
, iter
,
2086 true, OPTAB_LIB_WIDEN
);
2088 emit_move_insn (iter
, tmp
);
2090 emit_label (cmp_label
);
2092 emit_cmp_and_jump_insns (iter
, size
, LT
, NULL_RTX
, iter_mode
,
2094 profile_probability::guessed_always ()
2095 .apply_scale (9, 10));
2098 /* Expand a call to memcpy or memmove or memcmp, and return the result.
2099 TAILCALL is true if this is a tail call. */
2102 emit_block_op_via_libcall (enum built_in_function fncode
, rtx dst
, rtx src
,
2103 rtx size
, bool tailcall
)
2105 rtx dst_addr
, src_addr
;
2106 tree call_expr
, dst_tree
, src_tree
, size_tree
;
2107 machine_mode size_mode
;
2109 /* Since dst and src are passed to a libcall, mark the corresponding
2110 tree EXPR as addressable. */
2111 tree dst_expr
= MEM_EXPR (dst
);
2112 tree src_expr
= MEM_EXPR (src
);
2114 mark_addressable (dst_expr
);
2116 mark_addressable (src_expr
);
2118 dst_addr
= copy_addr_to_reg (XEXP (dst
, 0));
2119 dst_addr
= convert_memory_address (ptr_mode
, dst_addr
);
2120 dst_tree
= make_tree (ptr_type_node
, dst_addr
);
2122 src_addr
= copy_addr_to_reg (XEXP (src
, 0));
2123 src_addr
= convert_memory_address (ptr_mode
, src_addr
);
2124 src_tree
= make_tree (ptr_type_node
, src_addr
);
2126 size_mode
= TYPE_MODE (sizetype
);
2127 size
= convert_to_mode (size_mode
, size
, 1);
2128 size
= copy_to_mode_reg (size_mode
, size
);
2129 size_tree
= make_tree (sizetype
, size
);
2131 /* It is incorrect to use the libcall calling conventions for calls to
2132 memcpy/memmove/memcmp because they can be provided by the user. */
2133 tree fn
= builtin_decl_implicit (fncode
);
2134 call_expr
= build_call_expr (fn
, 3, dst_tree
, src_tree
, size_tree
);
2135 CALL_EXPR_TAILCALL (call_expr
) = tailcall
;
2137 return expand_call (call_expr
, NULL_RTX
, false);
2140 /* Try to expand cmpstrn or cmpmem operation ICODE with the given operands.
2141 ARG3_TYPE is the type of ARG3_RTX. Return the result rtx on success,
2142 otherwise return null. */
2145 expand_cmpstrn_or_cmpmem (insn_code icode
, rtx target
, rtx arg1_rtx
,
2146 rtx arg2_rtx
, tree arg3_type
, rtx arg3_rtx
,
2147 HOST_WIDE_INT align
)
2149 machine_mode insn_mode
= insn_data
[icode
].operand
[0].mode
;
2151 if (target
&& (!REG_P (target
) || HARD_REGISTER_P (target
)))
2154 class expand_operand ops
[5];
2155 create_output_operand (&ops
[0], target
, insn_mode
);
2156 create_fixed_operand (&ops
[1], arg1_rtx
);
2157 create_fixed_operand (&ops
[2], arg2_rtx
);
2158 create_convert_operand_from (&ops
[3], arg3_rtx
, TYPE_MODE (arg3_type
),
2159 TYPE_UNSIGNED (arg3_type
));
2160 create_integer_operand (&ops
[4], align
);
2161 if (maybe_expand_insn (icode
, 5, ops
))
2162 return ops
[0].value
;
2166 /* Expand a block compare between X and Y with length LEN using the
2167 cmpmem optab, placing the result in TARGET. LEN_TYPE is the type
2168 of the expression that was used to calculate the length. ALIGN
2169 gives the known minimum common alignment. */
2172 emit_block_cmp_via_cmpmem (rtx x
, rtx y
, rtx len
, tree len_type
, rtx target
,
2175 /* Note: The cmpstrnsi pattern, if it exists, is not suitable for
2176 implementing memcmp because it will stop if it encounters two
2178 insn_code icode
= direct_optab_handler (cmpmem_optab
, SImode
);
2180 if (icode
== CODE_FOR_nothing
)
2183 return expand_cmpstrn_or_cmpmem (icode
, target
, x
, y
, len_type
, len
, align
);
2186 /* Emit code to compare a block Y to a block X. This may be done with
2187 string-compare instructions, with multiple scalar instructions,
2188 or with a library call.
2190 Both X and Y must be MEM rtx's. LEN is an rtx that says how long
2191 they are. LEN_TYPE is the type of the expression that was used to
2194 If EQUALITY_ONLY is true, it means we don't have to return the tri-state
2195 value of a normal memcmp call, instead we can just compare for equality.
2196 If FORCE_LIBCALL is true, we should emit a call to memcmp rather than
2199 Optionally, the caller can pass a constfn and associated data in Y_CFN
2200 and Y_CFN_DATA. describing that the second operand being compared is a
2201 known constant and how to obtain its data.
2202 Return the result of the comparison, or NULL_RTX if we failed to
2203 perform the operation. */
2206 emit_block_cmp_hints (rtx x
, rtx y
, rtx len
, tree len_type
, rtx target
,
2207 bool equality_only
, by_pieces_constfn y_cfn
,
2212 if (CONST_INT_P (len
) && INTVAL (len
) == 0)
2215 gcc_assert (MEM_P (x
) && MEM_P (y
));
2216 unsigned int align
= MIN (MEM_ALIGN (x
), MEM_ALIGN (y
));
2217 gcc_assert (align
>= BITS_PER_UNIT
);
2219 x
= adjust_address (x
, BLKmode
, 0);
2220 y
= adjust_address (y
, BLKmode
, 0);
2223 && CONST_INT_P (len
)
2224 && can_do_by_pieces (INTVAL (len
), align
, COMPARE_BY_PIECES
))
2225 result
= compare_by_pieces (x
, y
, INTVAL (len
), target
, align
,
2228 result
= emit_block_cmp_via_cmpmem (x
, y
, len
, len_type
, target
, align
);
2233 /* Copy all or part of a value X into registers starting at REGNO.
2234 The number of registers to be filled is NREGS. */
2237 move_block_to_reg (int regno
, rtx x
, int nregs
, machine_mode mode
)
2242 if (CONSTANT_P (x
) && !targetm
.legitimate_constant_p (mode
, x
))
2243 x
= validize_mem (force_const_mem (mode
, x
));
2245 /* See if the machine can do this with a load multiple insn. */
2246 if (targetm
.have_load_multiple ())
2248 rtx_insn
*last
= get_last_insn ();
2249 rtx first
= gen_rtx_REG (word_mode
, regno
);
2250 if (rtx_insn
*pat
= targetm
.gen_load_multiple (first
, x
,
2257 delete_insns_since (last
);
2260 for (int i
= 0; i
< nregs
; i
++)
2261 emit_move_insn (gen_rtx_REG (word_mode
, regno
+ i
),
2262 operand_subword_force (x
, i
, mode
));
2265 /* Copy all or part of a BLKmode value X out of registers starting at REGNO.
2266 The number of registers to be filled is NREGS. */
2269 move_block_from_reg (int regno
, rtx x
, int nregs
)
2274 /* See if the machine can do this with a store multiple insn. */
2275 if (targetm
.have_store_multiple ())
2277 rtx_insn
*last
= get_last_insn ();
2278 rtx first
= gen_rtx_REG (word_mode
, regno
);
2279 if (rtx_insn
*pat
= targetm
.gen_store_multiple (x
, first
,
2286 delete_insns_since (last
);
2289 for (int i
= 0; i
< nregs
; i
++)
2291 rtx tem
= operand_subword (x
, i
, 1, BLKmode
);
2295 emit_move_insn (tem
, gen_rtx_REG (word_mode
, regno
+ i
));
2299 /* Generate a PARALLEL rtx for a new non-consecutive group of registers from
2300 ORIG, where ORIG is a non-consecutive group of registers represented by
2301 a PARALLEL. The clone is identical to the original except in that the
2302 original set of registers is replaced by a new set of pseudo registers.
2303 The new set has the same modes as the original set. */
2306 gen_group_rtx (rtx orig
)
2311 gcc_assert (GET_CODE (orig
) == PARALLEL
);
2313 length
= XVECLEN (orig
, 0);
2314 tmps
= XALLOCAVEC (rtx
, length
);
2316 /* Skip a NULL entry in first slot. */
2317 i
= XEXP (XVECEXP (orig
, 0, 0), 0) ? 0 : 1;
2322 for (; i
< length
; i
++)
2324 machine_mode mode
= GET_MODE (XEXP (XVECEXP (orig
, 0, i
), 0));
2325 rtx offset
= XEXP (XVECEXP (orig
, 0, i
), 1);
2327 tmps
[i
] = gen_rtx_EXPR_LIST (VOIDmode
, gen_reg_rtx (mode
), offset
);
2330 return gen_rtx_PARALLEL (GET_MODE (orig
), gen_rtvec_v (length
, tmps
));
2333 /* A subroutine of emit_group_load. Arguments as for emit_group_load,
2334 except that values are placed in TMPS[i], and must later be moved
2335 into corresponding XEXP (XVECEXP (DST, 0, i), 0) element. */
2338 emit_group_load_1 (rtx
*tmps
, rtx dst
, rtx orig_src
, tree type
,
2343 machine_mode m
= GET_MODE (orig_src
);
2345 gcc_assert (GET_CODE (dst
) == PARALLEL
);
2348 && !SCALAR_INT_MODE_P (m
)
2349 && !MEM_P (orig_src
)
2350 && GET_CODE (orig_src
) != CONCAT
)
2352 scalar_int_mode imode
;
2353 if (int_mode_for_mode (GET_MODE (orig_src
)).exists (&imode
))
2355 src
= gen_reg_rtx (imode
);
2356 emit_move_insn (gen_lowpart (GET_MODE (orig_src
), src
), orig_src
);
2360 src
= assign_stack_temp (GET_MODE (orig_src
), ssize
);
2361 emit_move_insn (src
, orig_src
);
2363 emit_group_load_1 (tmps
, dst
, src
, type
, ssize
);
2367 /* Check for a NULL entry, used to indicate that the parameter goes
2368 both on the stack and in registers. */
2369 if (XEXP (XVECEXP (dst
, 0, 0), 0))
2374 /* Process the pieces. */
2375 for (i
= start
; i
< XVECLEN (dst
, 0); i
++)
2377 machine_mode mode
= GET_MODE (XEXP (XVECEXP (dst
, 0, i
), 0));
2378 poly_int64 bytepos
= rtx_to_poly_int64 (XEXP (XVECEXP (dst
, 0, i
), 1));
2379 poly_int64 bytelen
= GET_MODE_SIZE (mode
);
2380 poly_int64 shift
= 0;
2382 /* Handle trailing fragments that run over the size of the struct.
2383 It's the target's responsibility to make sure that the fragment
2384 cannot be strictly smaller in some cases and strictly larger
2386 gcc_checking_assert (ordered_p (bytepos
+ bytelen
, ssize
));
2387 if (known_size_p (ssize
) && maybe_gt (bytepos
+ bytelen
, ssize
))
2389 /* Arrange to shift the fragment to where it belongs.
2390 extract_bit_field loads to the lsb of the reg. */
2392 #ifdef BLOCK_REG_PADDING
2393 BLOCK_REG_PADDING (GET_MODE (orig_src
), type
, i
== start
)
2394 == (BYTES_BIG_ENDIAN
? PAD_UPWARD
: PAD_DOWNWARD
)
2399 shift
= (bytelen
- (ssize
- bytepos
)) * BITS_PER_UNIT
;
2400 bytelen
= ssize
- bytepos
;
2401 gcc_assert (maybe_gt (bytelen
, 0));
2404 /* If we won't be loading directly from memory, protect the real source
2405 from strange tricks we might play; but make sure that the source can
2406 be loaded directly into the destination. */
2408 if (!MEM_P (orig_src
)
2409 && (!CONSTANT_P (orig_src
)
2410 || (GET_MODE (orig_src
) != mode
2411 && GET_MODE (orig_src
) != VOIDmode
)))
2413 if (GET_MODE (orig_src
) == VOIDmode
)
2414 src
= gen_reg_rtx (mode
);
2416 src
= gen_reg_rtx (GET_MODE (orig_src
));
2418 emit_move_insn (src
, orig_src
);
2421 /* Optimize the access just a bit. */
2423 && (! targetm
.slow_unaligned_access (mode
, MEM_ALIGN (src
))
2424 || MEM_ALIGN (src
) >= GET_MODE_ALIGNMENT (mode
))
2425 && multiple_p (bytepos
* BITS_PER_UNIT
, GET_MODE_ALIGNMENT (mode
))
2426 && known_eq (bytelen
, GET_MODE_SIZE (mode
)))
2428 tmps
[i
] = gen_reg_rtx (mode
);
2429 emit_move_insn (tmps
[i
], adjust_address (src
, mode
, bytepos
));
2431 else if (COMPLEX_MODE_P (mode
)
2432 && GET_MODE (src
) == mode
2433 && known_eq (bytelen
, GET_MODE_SIZE (mode
)))
2434 /* Let emit_move_complex do the bulk of the work. */
2436 else if (GET_CODE (src
) == CONCAT
)
2438 poly_int64 slen
= GET_MODE_SIZE (GET_MODE (src
));
2439 poly_int64 slen0
= GET_MODE_SIZE (GET_MODE (XEXP (src
, 0)));
2443 if (can_div_trunc_p (bytepos
, slen0
, &elt
, &subpos
)
2444 && known_le (subpos
+ bytelen
, slen0
))
2446 /* The following assumes that the concatenated objects all
2447 have the same size. In this case, a simple calculation
2448 can be used to determine the object and the bit field
2450 tmps
[i
] = XEXP (src
, elt
);
2451 if (maybe_ne (subpos
, 0)
2452 || maybe_ne (subpos
+ bytelen
, slen0
)
2453 || (!CONSTANT_P (tmps
[i
])
2454 && (!REG_P (tmps
[i
]) || GET_MODE (tmps
[i
]) != mode
)))
2455 tmps
[i
] = extract_bit_field (tmps
[i
], bytelen
* BITS_PER_UNIT
,
2456 subpos
* BITS_PER_UNIT
,
2457 1, NULL_RTX
, mode
, mode
, false,
2464 gcc_assert (known_eq (bytepos
, 0));
2465 mem
= assign_stack_temp (GET_MODE (src
), slen
);
2466 emit_move_insn (mem
, src
);
2467 tmps
[i
] = extract_bit_field (mem
, bytelen
* BITS_PER_UNIT
,
2468 0, 1, NULL_RTX
, mode
, mode
, false,
2472 /* FIXME: A SIMD parallel will eventually lead to a subreg of a
2473 SIMD register, which is currently broken. While we get GCC
2474 to emit proper RTL for these cases, let's dump to memory. */
2475 else if (VECTOR_MODE_P (GET_MODE (dst
))
2478 poly_uint64 slen
= GET_MODE_SIZE (GET_MODE (src
));
2481 mem
= assign_stack_temp (GET_MODE (src
), slen
);
2482 emit_move_insn (mem
, src
);
2483 tmps
[i
] = adjust_address (mem
, mode
, bytepos
);
2485 else if (CONSTANT_P (src
) && GET_MODE (dst
) != BLKmode
2486 && XVECLEN (dst
, 0) > 1)
2487 tmps
[i
] = simplify_gen_subreg (mode
, src
, GET_MODE (dst
), bytepos
);
2488 else if (CONSTANT_P (src
))
2490 if (known_eq (bytelen
, ssize
))
2496 /* TODO: const_wide_int can have sizes other than this... */
2497 gcc_assert (known_eq (2 * bytelen
, ssize
));
2498 split_double (src
, &first
, &second
);
2505 else if (REG_P (src
) && GET_MODE (src
) == mode
)
2508 tmps
[i
] = extract_bit_field (src
, bytelen
* BITS_PER_UNIT
,
2509 bytepos
* BITS_PER_UNIT
, 1, NULL_RTX
,
2510 mode
, mode
, false, NULL
);
2512 if (maybe_ne (shift
, 0))
2513 tmps
[i
] = expand_shift (LSHIFT_EXPR
, mode
, tmps
[i
],
2518 /* Emit code to move a block SRC of type TYPE to a block DST,
2519 where DST is non-consecutive registers represented by a PARALLEL.
2520 SSIZE represents the total size of block ORIG_SRC in bytes, or -1
2524 emit_group_load (rtx dst
, rtx src
, tree type
, poly_int64 ssize
)
2529 tmps
= XALLOCAVEC (rtx
, XVECLEN (dst
, 0));
2530 emit_group_load_1 (tmps
, dst
, src
, type
, ssize
);
2532 /* Copy the extracted pieces into the proper (probable) hard regs. */
2533 for (i
= 0; i
< XVECLEN (dst
, 0); i
++)
2535 rtx d
= XEXP (XVECEXP (dst
, 0, i
), 0);
2538 emit_move_insn (d
, tmps
[i
]);
2542 /* Similar, but load SRC into new pseudos in a format that looks like
2543 PARALLEL. This can later be fed to emit_group_move to get things
2544 in the right place. */
2547 emit_group_load_into_temps (rtx parallel
, rtx src
, tree type
, poly_int64 ssize
)
2552 vec
= rtvec_alloc (XVECLEN (parallel
, 0));
2553 emit_group_load_1 (&RTVEC_ELT (vec
, 0), parallel
, src
, type
, ssize
);
2555 /* Convert the vector to look just like the original PARALLEL, except
2556 with the computed values. */
2557 for (i
= 0; i
< XVECLEN (parallel
, 0); i
++)
2559 rtx e
= XVECEXP (parallel
, 0, i
);
2560 rtx d
= XEXP (e
, 0);
2564 d
= force_reg (GET_MODE (d
), RTVEC_ELT (vec
, i
));
2565 e
= alloc_EXPR_LIST (REG_NOTE_KIND (e
), d
, XEXP (e
, 1));
2567 RTVEC_ELT (vec
, i
) = e
;
2570 return gen_rtx_PARALLEL (GET_MODE (parallel
), vec
);
2573 /* Emit code to move a block SRC to block DST, where SRC and DST are
2574 non-consecutive groups of registers, each represented by a PARALLEL. */
2577 emit_group_move (rtx dst
, rtx src
)
2581 gcc_assert (GET_CODE (src
) == PARALLEL
2582 && GET_CODE (dst
) == PARALLEL
2583 && XVECLEN (src
, 0) == XVECLEN (dst
, 0));
2585 /* Skip first entry if NULL. */
2586 for (i
= XEXP (XVECEXP (src
, 0, 0), 0) ? 0 : 1; i
< XVECLEN (src
, 0); i
++)
2587 emit_move_insn (XEXP (XVECEXP (dst
, 0, i
), 0),
2588 XEXP (XVECEXP (src
, 0, i
), 0));
2591 /* Move a group of registers represented by a PARALLEL into pseudos. */
2594 emit_group_move_into_temps (rtx src
)
2596 rtvec vec
= rtvec_alloc (XVECLEN (src
, 0));
2599 for (i
= 0; i
< XVECLEN (src
, 0); i
++)
2601 rtx e
= XVECEXP (src
, 0, i
);
2602 rtx d
= XEXP (e
, 0);
2605 e
= alloc_EXPR_LIST (REG_NOTE_KIND (e
), copy_to_reg (d
), XEXP (e
, 1));
2606 RTVEC_ELT (vec
, i
) = e
;
2609 return gen_rtx_PARALLEL (GET_MODE (src
), vec
);
2612 /* Emit code to move a block SRC to a block ORIG_DST of type TYPE,
2613 where SRC is non-consecutive registers represented by a PARALLEL.
2614 SSIZE represents the total size of block ORIG_DST, or -1 if not
2618 emit_group_store (rtx orig_dst
, rtx src
, tree type ATTRIBUTE_UNUSED
,
2622 int start
, finish
, i
;
2623 machine_mode m
= GET_MODE (orig_dst
);
2625 gcc_assert (GET_CODE (src
) == PARALLEL
);
2627 if (!SCALAR_INT_MODE_P (m
)
2628 && !MEM_P (orig_dst
) && GET_CODE (orig_dst
) != CONCAT
)
2630 scalar_int_mode imode
;
2631 if (int_mode_for_mode (GET_MODE (orig_dst
)).exists (&imode
))
2633 dst
= gen_reg_rtx (imode
);
2634 emit_group_store (dst
, src
, type
, ssize
);
2635 dst
= gen_lowpart (GET_MODE (orig_dst
), dst
);
2639 dst
= assign_stack_temp (GET_MODE (orig_dst
), ssize
);
2640 emit_group_store (dst
, src
, type
, ssize
);
2642 emit_move_insn (orig_dst
, dst
);
2646 /* Check for a NULL entry, used to indicate that the parameter goes
2647 both on the stack and in registers. */
2648 if (XEXP (XVECEXP (src
, 0, 0), 0))
2652 finish
= XVECLEN (src
, 0);
2654 tmps
= XALLOCAVEC (rtx
, finish
);
2656 /* Copy the (probable) hard regs into pseudos. */
2657 for (i
= start
; i
< finish
; i
++)
2659 rtx reg
= XEXP (XVECEXP (src
, 0, i
), 0);
2660 if (!REG_P (reg
) || REGNO (reg
) < FIRST_PSEUDO_REGISTER
)
2662 tmps
[i
] = gen_reg_rtx (GET_MODE (reg
));
2663 emit_move_insn (tmps
[i
], reg
);
2669 /* If we won't be storing directly into memory, protect the real destination
2670 from strange tricks we might play. */
2672 if (GET_CODE (dst
) == PARALLEL
)
2676 /* We can get a PARALLEL dst if there is a conditional expression in
2677 a return statement. In that case, the dst and src are the same,
2678 so no action is necessary. */
2679 if (rtx_equal_p (dst
, src
))
2682 /* It is unclear if we can ever reach here, but we may as well handle
2683 it. Allocate a temporary, and split this into a store/load to/from
2685 temp
= assign_stack_temp (GET_MODE (dst
), ssize
);
2686 emit_group_store (temp
, src
, type
, ssize
);
2687 emit_group_load (dst
, temp
, type
, ssize
);
2690 else if (!MEM_P (dst
) && GET_CODE (dst
) != CONCAT
)
2692 machine_mode outer
= GET_MODE (dst
);
2698 if (!REG_P (dst
) || REGNO (dst
) < FIRST_PSEUDO_REGISTER
)
2699 dst
= gen_reg_rtx (outer
);
2701 /* Make life a bit easier for combine. */
2702 /* If the first element of the vector is the low part
2703 of the destination mode, use a paradoxical subreg to
2704 initialize the destination. */
2707 inner
= GET_MODE (tmps
[start
]);
2708 bytepos
= subreg_lowpart_offset (inner
, outer
);
2709 if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src
, 0, start
), 1)),
2712 temp
= simplify_gen_subreg (outer
, tmps
[start
],
2716 emit_move_insn (dst
, temp
);
2723 /* If the first element wasn't the low part, try the last. */
2725 && start
< finish
- 1)
2727 inner
= GET_MODE (tmps
[finish
- 1]);
2728 bytepos
= subreg_lowpart_offset (inner
, outer
);
2729 if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src
, 0,
2733 temp
= simplify_gen_subreg (outer
, tmps
[finish
- 1],
2737 emit_move_insn (dst
, temp
);
2744 /* Otherwise, simply initialize the result to zero. */
2746 emit_move_insn (dst
, CONST0_RTX (outer
));
2749 /* Process the pieces. */
2750 for (i
= start
; i
< finish
; i
++)
2752 poly_int64 bytepos
= rtx_to_poly_int64 (XEXP (XVECEXP (src
, 0, i
), 1));
2753 machine_mode mode
= GET_MODE (tmps
[i
]);
2754 poly_int64 bytelen
= GET_MODE_SIZE (mode
);
2755 poly_uint64 adj_bytelen
;
2758 /* Handle trailing fragments that run over the size of the struct.
2759 It's the target's responsibility to make sure that the fragment
2760 cannot be strictly smaller in some cases and strictly larger
2762 gcc_checking_assert (ordered_p (bytepos
+ bytelen
, ssize
));
2763 if (known_size_p (ssize
) && maybe_gt (bytepos
+ bytelen
, ssize
))
2764 adj_bytelen
= ssize
- bytepos
;
2766 adj_bytelen
= bytelen
;
2768 if (GET_CODE (dst
) == CONCAT
)
2770 if (known_le (bytepos
+ adj_bytelen
,
2771 GET_MODE_SIZE (GET_MODE (XEXP (dst
, 0)))))
2772 dest
= XEXP (dst
, 0);
2773 else if (known_ge (bytepos
, GET_MODE_SIZE (GET_MODE (XEXP (dst
, 0)))))
2775 bytepos
-= GET_MODE_SIZE (GET_MODE (XEXP (dst
, 0)));
2776 dest
= XEXP (dst
, 1);
2780 machine_mode dest_mode
= GET_MODE (dest
);
2781 machine_mode tmp_mode
= GET_MODE (tmps
[i
]);
2783 gcc_assert (known_eq (bytepos
, 0) && XVECLEN (src
, 0));
2785 if (GET_MODE_ALIGNMENT (dest_mode
)
2786 >= GET_MODE_ALIGNMENT (tmp_mode
))
2788 dest
= assign_stack_temp (dest_mode
,
2789 GET_MODE_SIZE (dest_mode
));
2790 emit_move_insn (adjust_address (dest
,
2798 dest
= assign_stack_temp (tmp_mode
,
2799 GET_MODE_SIZE (tmp_mode
));
2800 emit_move_insn (dest
, tmps
[i
]);
2801 dst
= adjust_address (dest
, dest_mode
, bytepos
);
2807 /* Handle trailing fragments that run over the size of the struct. */
2808 if (known_size_p (ssize
) && maybe_gt (bytepos
+ bytelen
, ssize
))
2810 /* store_bit_field always takes its value from the lsb.
2811 Move the fragment to the lsb if it's not already there. */
2813 #ifdef BLOCK_REG_PADDING
2814 BLOCK_REG_PADDING (GET_MODE (orig_dst
), type
, i
== start
)
2815 == (BYTES_BIG_ENDIAN
? PAD_UPWARD
: PAD_DOWNWARD
)
2821 poly_int64 shift
= (bytelen
- (ssize
- bytepos
)) * BITS_PER_UNIT
;
2822 tmps
[i
] = expand_shift (RSHIFT_EXPR
, mode
, tmps
[i
],
2826 /* Make sure not to write past the end of the struct. */
2827 store_bit_field (dest
,
2828 adj_bytelen
* BITS_PER_UNIT
, bytepos
* BITS_PER_UNIT
,
2829 bytepos
* BITS_PER_UNIT
, ssize
* BITS_PER_UNIT
- 1,
2830 VOIDmode
, tmps
[i
], false);
2833 /* Optimize the access just a bit. */
2834 else if (MEM_P (dest
)
2835 && (!targetm
.slow_unaligned_access (mode
, MEM_ALIGN (dest
))
2836 || MEM_ALIGN (dest
) >= GET_MODE_ALIGNMENT (mode
))
2837 && multiple_p (bytepos
* BITS_PER_UNIT
,
2838 GET_MODE_ALIGNMENT (mode
))
2839 && known_eq (bytelen
, GET_MODE_SIZE (mode
)))
2840 emit_move_insn (adjust_address (dest
, mode
, bytepos
), tmps
[i
]);
2843 store_bit_field (dest
, bytelen
* BITS_PER_UNIT
, bytepos
* BITS_PER_UNIT
,
2844 0, 0, mode
, tmps
[i
], false);
2847 /* Copy from the pseudo into the (probable) hard reg. */
2848 if (orig_dst
!= dst
)
2849 emit_move_insn (orig_dst
, dst
);
2852 /* Return a form of X that does not use a PARALLEL. TYPE is the type
2853 of the value stored in X. */
2856 maybe_emit_group_store (rtx x
, tree type
)
2858 machine_mode mode
= TYPE_MODE (type
);
2859 gcc_checking_assert (GET_MODE (x
) == VOIDmode
|| GET_MODE (x
) == mode
);
2860 if (GET_CODE (x
) == PARALLEL
)
2862 rtx result
= gen_reg_rtx (mode
);
2863 emit_group_store (result
, x
, type
, int_size_in_bytes (type
));
2869 /* Copy a BLKmode object of TYPE out of a register SRCREG into TARGET.
2871 This is used on targets that return BLKmode values in registers. */
2874 copy_blkmode_from_reg (rtx target
, rtx srcreg
, tree type
)
2876 unsigned HOST_WIDE_INT bytes
= int_size_in_bytes (type
);
2877 rtx src
= NULL
, dst
= NULL
;
2878 unsigned HOST_WIDE_INT bitsize
= MIN (TYPE_ALIGN (type
), BITS_PER_WORD
);
2879 unsigned HOST_WIDE_INT bitpos
, xbitpos
, padding_correction
= 0;
2880 /* No current ABI uses variable-sized modes to pass a BLKmnode type. */
2881 fixed_size_mode mode
= as_a
<fixed_size_mode
> (GET_MODE (srcreg
));
2882 fixed_size_mode tmode
= as_a
<fixed_size_mode
> (GET_MODE (target
));
2883 fixed_size_mode copy_mode
;
2885 /* BLKmode registers created in the back-end shouldn't have survived. */
2886 gcc_assert (mode
!= BLKmode
);
2888 /* If the structure doesn't take up a whole number of words, see whether
2889 SRCREG is padded on the left or on the right. If it's on the left,
2890 set PADDING_CORRECTION to the number of bits to skip.
2892 In most ABIs, the structure will be returned at the least end of
2893 the register, which translates to right padding on little-endian
2894 targets and left padding on big-endian targets. The opposite
2895 holds if the structure is returned at the most significant
2896 end of the register. */
2897 if (bytes
% UNITS_PER_WORD
!= 0
2898 && (targetm
.calls
.return_in_msb (type
)
2900 : BYTES_BIG_ENDIAN
))
2902 = (BITS_PER_WORD
- ((bytes
% UNITS_PER_WORD
) * BITS_PER_UNIT
));
2904 /* We can use a single move if we have an exact mode for the size. */
2905 else if (MEM_P (target
)
2906 && (!targetm
.slow_unaligned_access (mode
, MEM_ALIGN (target
))
2907 || MEM_ALIGN (target
) >= GET_MODE_ALIGNMENT (mode
))
2908 && bytes
== GET_MODE_SIZE (mode
))
2910 emit_move_insn (adjust_address (target
, mode
, 0), srcreg
);
2914 /* And if we additionally have the same mode for a register. */
2915 else if (REG_P (target
)
2916 && GET_MODE (target
) == mode
2917 && bytes
== GET_MODE_SIZE (mode
))
2919 emit_move_insn (target
, srcreg
);
2923 /* This code assumes srcreg is at least a full word. If it isn't, copy it
2924 into a new pseudo which is a full word. */
2925 if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
2927 srcreg
= convert_to_mode (word_mode
, srcreg
, TYPE_UNSIGNED (type
));
2931 /* Copy the structure BITSIZE bits at a time. If the target lives in
2932 memory, take care of not reading/writing past its end by selecting
2933 a copy mode suited to BITSIZE. This should always be possible given
2936 If the target lives in register, make sure not to select a copy mode
2937 larger than the mode of the register.
2939 We could probably emit more efficient code for machines which do not use
2940 strict alignment, but it doesn't seem worth the effort at the current
2943 copy_mode
= word_mode
;
2946 opt_scalar_int_mode mem_mode
= int_mode_for_size (bitsize
, 1);
2947 if (mem_mode
.exists ())
2948 copy_mode
= mem_mode
.require ();
2950 else if (REG_P (target
) && GET_MODE_BITSIZE (tmode
) < BITS_PER_WORD
)
2953 for (bitpos
= 0, xbitpos
= padding_correction
;
2954 bitpos
< bytes
* BITS_PER_UNIT
;
2955 bitpos
+= bitsize
, xbitpos
+= bitsize
)
2957 /* We need a new source operand each time xbitpos is on a
2958 word boundary and when xbitpos == padding_correction
2959 (the first time through). */
2960 if (xbitpos
% BITS_PER_WORD
== 0 || xbitpos
== padding_correction
)
2961 src
= operand_subword_force (srcreg
, xbitpos
/ BITS_PER_WORD
, mode
);
2963 /* We need a new destination operand each time bitpos is on
2965 if (REG_P (target
) && GET_MODE_BITSIZE (tmode
) < BITS_PER_WORD
)
2967 else if (bitpos
% BITS_PER_WORD
== 0)
2968 dst
= operand_subword (target
, bitpos
/ BITS_PER_WORD
, 1, tmode
);
2970 /* Use xbitpos for the source extraction (right justified) and
2971 bitpos for the destination store (left justified). */
2972 store_bit_field (dst
, bitsize
, bitpos
% BITS_PER_WORD
, 0, 0, copy_mode
,
2973 extract_bit_field (src
, bitsize
,
2974 xbitpos
% BITS_PER_WORD
, 1,
2975 NULL_RTX
, copy_mode
, copy_mode
,
2981 /* Copy BLKmode value SRC into a register of mode MODE_IN. Return the
2982 register if it contains any data, otherwise return null.
2984 This is used on targets that return BLKmode values in registers. */
2987 copy_blkmode_to_reg (machine_mode mode_in
, tree src
)
2990 unsigned HOST_WIDE_INT bitpos
, xbitpos
, padding_correction
= 0, bytes
;
2991 unsigned int bitsize
;
2992 rtx
*dst_words
, dst
, x
, src_word
= NULL_RTX
, dst_word
= NULL_RTX
;
2993 /* No current ABI uses variable-sized modes to pass a BLKmnode type. */
2994 fixed_size_mode mode
= as_a
<fixed_size_mode
> (mode_in
);
2995 fixed_size_mode dst_mode
;
2996 scalar_int_mode min_mode
;
2998 gcc_assert (TYPE_MODE (TREE_TYPE (src
)) == BLKmode
);
3000 x
= expand_normal (src
);
3002 bytes
= arg_int_size_in_bytes (TREE_TYPE (src
));
3006 /* If the structure doesn't take up a whole number of words, see
3007 whether the register value should be padded on the left or on
3008 the right. Set PADDING_CORRECTION to the number of padding
3009 bits needed on the left side.
3011 In most ABIs, the structure will be returned at the least end of
3012 the register, which translates to right padding on little-endian
3013 targets and left padding on big-endian targets. The opposite
3014 holds if the structure is returned at the most significant
3015 end of the register. */
3016 if (bytes
% UNITS_PER_WORD
!= 0
3017 && (targetm
.calls
.return_in_msb (TREE_TYPE (src
))
3019 : BYTES_BIG_ENDIAN
))
3020 padding_correction
= (BITS_PER_WORD
- ((bytes
% UNITS_PER_WORD
)
3023 n_regs
= (bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
3024 dst_words
= XALLOCAVEC (rtx
, n_regs
);
3025 bitsize
= MIN (TYPE_ALIGN (TREE_TYPE (src
)), BITS_PER_WORD
);
3026 min_mode
= smallest_int_mode_for_size (bitsize
);
3028 /* Copy the structure BITSIZE bits at a time. */
3029 for (bitpos
= 0, xbitpos
= padding_correction
;
3030 bitpos
< bytes
* BITS_PER_UNIT
;
3031 bitpos
+= bitsize
, xbitpos
+= bitsize
)
3033 /* We need a new destination pseudo each time xbitpos is
3034 on a word boundary and when xbitpos == padding_correction
3035 (the first time through). */
3036 if (xbitpos
% BITS_PER_WORD
== 0
3037 || xbitpos
== padding_correction
)
3039 /* Generate an appropriate register. */
3040 dst_word
= gen_reg_rtx (word_mode
);
3041 dst_words
[xbitpos
/ BITS_PER_WORD
] = dst_word
;
3043 /* Clear the destination before we move anything into it. */
3044 emit_move_insn (dst_word
, CONST0_RTX (word_mode
));
3047 /* Find the largest integer mode that can be used to copy all or as
3048 many bits as possible of the structure if the target supports larger
3049 copies. There are too many corner cases here w.r.t to alignments on
3050 the read/writes. So if there is any padding just use single byte
3052 opt_scalar_int_mode mode_iter
;
3053 if (padding_correction
== 0 && !STRICT_ALIGNMENT
)
3055 FOR_EACH_MODE_FROM (mode_iter
, min_mode
)
3057 unsigned int msize
= GET_MODE_BITSIZE (mode_iter
.require ());
3058 if (msize
<= ((bytes
* BITS_PER_UNIT
) - bitpos
)
3059 && msize
<= BITS_PER_WORD
)
3066 /* We need a new source operand each time bitpos is on a word
3068 if (bitpos
% BITS_PER_WORD
== 0)
3069 src_word
= operand_subword_force (x
, bitpos
/ BITS_PER_WORD
, BLKmode
);
3071 /* Use bitpos for the source extraction (left justified) and
3072 xbitpos for the destination store (right justified). */
3073 store_bit_field (dst_word
, bitsize
, xbitpos
% BITS_PER_WORD
,
3075 extract_bit_field (src_word
, bitsize
,
3076 bitpos
% BITS_PER_WORD
, 1,
3077 NULL_RTX
, word_mode
, word_mode
,
3082 if (mode
== BLKmode
)
3084 /* Find the smallest integer mode large enough to hold the
3085 entire structure. */
3086 opt_scalar_int_mode mode_iter
;
3087 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_INT
)
3088 if (GET_MODE_SIZE (mode_iter
.require ()) >= bytes
)
3091 /* A suitable mode should have been found. */
3092 mode
= mode_iter
.require ();
3095 if (GET_MODE_SIZE (mode
) < GET_MODE_SIZE (word_mode
))
3096 dst_mode
= word_mode
;
3099 dst
= gen_reg_rtx (dst_mode
);
3101 for (i
= 0; i
< n_regs
; i
++)
3102 emit_move_insn (operand_subword (dst
, i
, 0, dst_mode
), dst_words
[i
]);
3104 if (mode
!= dst_mode
)
3105 dst
= gen_lowpart (mode
, dst
);
3110 /* Add a USE expression for REG to the (possibly empty) list pointed
3111 to by CALL_FUSAGE. REG must denote a hard register. */
3114 use_reg_mode (rtx
*call_fusage
, rtx reg
, machine_mode mode
)
3116 gcc_assert (REG_P (reg
));
3118 if (!HARD_REGISTER_P (reg
))
3122 = gen_rtx_EXPR_LIST (mode
, gen_rtx_USE (VOIDmode
, reg
), *call_fusage
);
3125 /* Add a CLOBBER expression for REG to the (possibly empty) list pointed
3126 to by CALL_FUSAGE. REG must denote a hard register. */
3129 clobber_reg_mode (rtx
*call_fusage
, rtx reg
, machine_mode mode
)
3131 gcc_assert (REG_P (reg
) && REGNO (reg
) < FIRST_PSEUDO_REGISTER
);
3134 = gen_rtx_EXPR_LIST (mode
, gen_rtx_CLOBBER (VOIDmode
, reg
), *call_fusage
);
3137 /* Add USE expressions to *CALL_FUSAGE for each of NREGS consecutive regs,
3138 starting at REGNO. All of these registers must be hard registers. */
3141 use_regs (rtx
*call_fusage
, int regno
, int nregs
)
3145 gcc_assert (regno
+ nregs
<= FIRST_PSEUDO_REGISTER
);
3147 for (i
= 0; i
< nregs
; i
++)
3148 use_reg (call_fusage
, regno_reg_rtx
[regno
+ i
]);
3151 /* Add USE expressions to *CALL_FUSAGE for each REG contained in the
3152 PARALLEL REGS. This is for calls that pass values in multiple
3153 non-contiguous locations. The Irix 6 ABI has examples of this. */
3156 use_group_regs (rtx
*call_fusage
, rtx regs
)
3160 for (i
= 0; i
< XVECLEN (regs
, 0); i
++)
3162 rtx reg
= XEXP (XVECEXP (regs
, 0, i
), 0);
3164 /* A NULL entry means the parameter goes both on the stack and in
3165 registers. This can also be a MEM for targets that pass values
3166 partially on the stack and partially in registers. */
3167 if (reg
!= 0 && REG_P (reg
))
3168 use_reg (call_fusage
, reg
);
3172 /* Return the defining gimple statement for SSA_NAME NAME if it is an
3173 assigment and the code of the expresion on the RHS is CODE. Return
3177 get_def_for_expr (tree name
, enum tree_code code
)
3181 if (TREE_CODE (name
) != SSA_NAME
)
3184 def_stmt
= get_gimple_for_ssa_name (name
);
3186 || gimple_assign_rhs_code (def_stmt
) != code
)
3192 /* Return the defining gimple statement for SSA_NAME NAME if it is an
3193 assigment and the class of the expresion on the RHS is CLASS. Return
3197 get_def_for_expr_class (tree name
, enum tree_code_class tclass
)
3201 if (TREE_CODE (name
) != SSA_NAME
)
3204 def_stmt
= get_gimple_for_ssa_name (name
);
3206 || TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt
)) != tclass
)
3212 /* Write zeros through the storage of OBJECT. If OBJECT has BLKmode, SIZE is
3213 its length in bytes. */
3216 clear_storage_hints (rtx object
, rtx size
, enum block_op_methods method
,
3217 unsigned int expected_align
, HOST_WIDE_INT expected_size
,
3218 unsigned HOST_WIDE_INT min_size
,
3219 unsigned HOST_WIDE_INT max_size
,
3220 unsigned HOST_WIDE_INT probable_max_size
,
3223 machine_mode mode
= GET_MODE (object
);
3226 gcc_assert (method
== BLOCK_OP_NORMAL
|| method
== BLOCK_OP_TAILCALL
);
3228 /* If OBJECT is not BLKmode and SIZE is the same size as its mode,
3229 just move a zero. Otherwise, do this a piece at a time. */
3230 poly_int64 size_val
;
3232 && poly_int_rtx_p (size
, &size_val
)
3233 && known_eq (size_val
, GET_MODE_SIZE (mode
)))
3235 rtx zero
= CONST0_RTX (mode
);
3238 emit_move_insn (object
, zero
);
3242 if (COMPLEX_MODE_P (mode
))
3244 zero
= CONST0_RTX (GET_MODE_INNER (mode
));
3247 write_complex_part (object
, zero
, 0);
3248 write_complex_part (object
, zero
, 1);
3254 if (size
== const0_rtx
)
3257 align
= MEM_ALIGN (object
);
3259 if (CONST_INT_P (size
)
3260 && targetm
.use_by_pieces_infrastructure_p (INTVAL (size
), align
,
3262 optimize_insn_for_speed_p ()))
3263 clear_by_pieces (object
, INTVAL (size
), align
);
3264 else if (set_storage_via_setmem (object
, size
, const0_rtx
, align
,
3265 expected_align
, expected_size
,
3266 min_size
, max_size
, probable_max_size
))
3268 else if (try_store_by_multiple_pieces (object
, size
, ctz_size
,
3270 NULL_RTX
, 0, align
))
3272 else if (ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (object
)))
3273 return set_storage_via_libcall (object
, size
, const0_rtx
,
3274 method
== BLOCK_OP_TAILCALL
);
3282 clear_storage (rtx object
, rtx size
, enum block_op_methods method
)
3284 unsigned HOST_WIDE_INT max
, min
= 0;
3285 if (GET_CODE (size
) == CONST_INT
)
3286 min
= max
= UINTVAL (size
);
3288 max
= GET_MODE_MASK (GET_MODE (size
));
3289 return clear_storage_hints (object
, size
, method
, 0, -1, min
, max
, max
, 0);
3293 /* A subroutine of clear_storage. Expand a call to memset.
3294 Return the return value of memset, 0 otherwise. */
3297 set_storage_via_libcall (rtx object
, rtx size
, rtx val
, bool tailcall
)
3299 tree call_expr
, fn
, object_tree
, size_tree
, val_tree
;
3300 machine_mode size_mode
;
3302 object
= copy_addr_to_reg (XEXP (object
, 0));
3303 object_tree
= make_tree (ptr_type_node
, object
);
3305 if (!CONST_INT_P (val
))
3306 val
= convert_to_mode (TYPE_MODE (integer_type_node
), val
, 1);
3307 val_tree
= make_tree (integer_type_node
, val
);
3309 size_mode
= TYPE_MODE (sizetype
);
3310 size
= convert_to_mode (size_mode
, size
, 1);
3311 size
= copy_to_mode_reg (size_mode
, size
);
3312 size_tree
= make_tree (sizetype
, size
);
3314 /* It is incorrect to use the libcall calling conventions for calls to
3315 memset because it can be provided by the user. */
3316 fn
= builtin_decl_implicit (BUILT_IN_MEMSET
);
3317 call_expr
= build_call_expr (fn
, 3, object_tree
, val_tree
, size_tree
);
3318 CALL_EXPR_TAILCALL (call_expr
) = tailcall
;
3320 return expand_call (call_expr
, NULL_RTX
, false);
3323 /* Expand a setmem pattern; return true if successful. */
3326 set_storage_via_setmem (rtx object
, rtx size
, rtx val
, unsigned int align
,
3327 unsigned int expected_align
, HOST_WIDE_INT expected_size
,
3328 unsigned HOST_WIDE_INT min_size
,
3329 unsigned HOST_WIDE_INT max_size
,
3330 unsigned HOST_WIDE_INT probable_max_size
)
3332 /* Try the most limited insn first, because there's no point
3333 including more than one in the machine description unless
3334 the more limited one has some advantage. */
3336 if (expected_align
< align
)
3337 expected_align
= align
;
3338 if (expected_size
!= -1)
3340 if ((unsigned HOST_WIDE_INT
)expected_size
> max_size
)
3341 expected_size
= max_size
;
3342 if ((unsigned HOST_WIDE_INT
)expected_size
< min_size
)
3343 expected_size
= min_size
;
3346 opt_scalar_int_mode mode_iter
;
3347 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_INT
)
3349 scalar_int_mode mode
= mode_iter
.require ();
3350 enum insn_code code
= direct_optab_handler (setmem_optab
, mode
);
3352 if (code
!= CODE_FOR_nothing
3353 /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
3354 here because if SIZE is less than the mode mask, as it is
3355 returned by the macro, it will definitely be less than the
3356 actual mode mask. Since SIZE is within the Pmode address
3357 space, we limit MODE to Pmode. */
3358 && ((CONST_INT_P (size
)
3359 && ((unsigned HOST_WIDE_INT
) INTVAL (size
)
3360 <= (GET_MODE_MASK (mode
) >> 1)))
3361 || max_size
<= (GET_MODE_MASK (mode
) >> 1)
3362 || GET_MODE_BITSIZE (mode
) >= GET_MODE_BITSIZE (Pmode
)))
3364 class expand_operand ops
[9];
3367 nops
= insn_data
[(int) code
].n_generator_args
;
3368 gcc_assert (nops
== 4 || nops
== 6 || nops
== 8 || nops
== 9);
3370 create_fixed_operand (&ops
[0], object
);
3371 /* The check above guarantees that this size conversion is valid. */
3372 create_convert_operand_to (&ops
[1], size
, mode
, true);
3373 create_convert_operand_from (&ops
[2], val
, byte_mode
, true);
3374 create_integer_operand (&ops
[3], align
/ BITS_PER_UNIT
);
3377 create_integer_operand (&ops
[4], expected_align
/ BITS_PER_UNIT
);
3378 create_integer_operand (&ops
[5], expected_size
);
3382 create_integer_operand (&ops
[6], min_size
);
3383 /* If we cannot represent the maximal size,
3384 make parameter NULL. */
3385 if ((HOST_WIDE_INT
) max_size
!= -1)
3386 create_integer_operand (&ops
[7], max_size
);
3388 create_fixed_operand (&ops
[7], NULL
);
3392 /* If we cannot represent the maximal size,
3393 make parameter NULL. */
3394 if ((HOST_WIDE_INT
) probable_max_size
!= -1)
3395 create_integer_operand (&ops
[8], probable_max_size
);
3397 create_fixed_operand (&ops
[8], NULL
);
3399 if (maybe_expand_insn (code
, nops
, ops
))
3408 /* Write to one of the components of the complex value CPLX. Write VAL to
3409 the real part if IMAG_P is false, and the imaginary part if its true. */
3412 write_complex_part (rtx cplx
, rtx val
, bool imag_p
)
3418 if (GET_CODE (cplx
) == CONCAT
)
3420 emit_move_insn (XEXP (cplx
, imag_p
), val
);
3424 cmode
= GET_MODE (cplx
);
3425 imode
= GET_MODE_INNER (cmode
);
3426 ibitsize
= GET_MODE_BITSIZE (imode
);
3428 /* For MEMs simplify_gen_subreg may generate an invalid new address
3429 because, e.g., the original address is considered mode-dependent
3430 by the target, which restricts simplify_subreg from invoking
3431 adjust_address_nv. Instead of preparing fallback support for an
3432 invalid address, we call adjust_address_nv directly. */
3435 emit_move_insn (adjust_address_nv (cplx
, imode
,
3436 imag_p
? GET_MODE_SIZE (imode
) : 0),
3441 /* If the sub-object is at least word sized, then we know that subregging
3442 will work. This special case is important, since store_bit_field
3443 wants to operate on integer modes, and there's rarely an OImode to
3444 correspond to TCmode. */
3445 if (ibitsize
>= BITS_PER_WORD
3446 /* For hard regs we have exact predicates. Assume we can split
3447 the original object if it spans an even number of hard regs.
3448 This special case is important for SCmode on 64-bit platforms
3449 where the natural size of floating-point regs is 32-bit. */
3451 && REGNO (cplx
) < FIRST_PSEUDO_REGISTER
3452 && REG_NREGS (cplx
) % 2 == 0))
3454 rtx part
= simplify_gen_subreg (imode
, cplx
, cmode
,
3455 imag_p
? GET_MODE_SIZE (imode
) : 0);
3458 emit_move_insn (part
, val
);
3462 /* simplify_gen_subreg may fail for sub-word MEMs. */
3463 gcc_assert (MEM_P (cplx
) && ibitsize
< BITS_PER_WORD
);
3466 store_bit_field (cplx
, ibitsize
, imag_p
? ibitsize
: 0, 0, 0, imode
, val
,
3470 /* Extract one of the components of the complex value CPLX. Extract the
3471 real part if IMAG_P is false, and the imaginary part if it's true. */
3474 read_complex_part (rtx cplx
, bool imag_p
)
3480 if (GET_CODE (cplx
) == CONCAT
)
3481 return XEXP (cplx
, imag_p
);
3483 cmode
= GET_MODE (cplx
);
3484 imode
= GET_MODE_INNER (cmode
);
3485 ibitsize
= GET_MODE_BITSIZE (imode
);
3487 /* Special case reads from complex constants that got spilled to memory. */
3488 if (MEM_P (cplx
) && GET_CODE (XEXP (cplx
, 0)) == SYMBOL_REF
)
3490 tree decl
= SYMBOL_REF_DECL (XEXP (cplx
, 0));
3491 if (decl
&& TREE_CODE (decl
) == COMPLEX_CST
)
3493 tree part
= imag_p
? TREE_IMAGPART (decl
) : TREE_REALPART (decl
);
3494 if (CONSTANT_CLASS_P (part
))
3495 return expand_expr (part
, NULL_RTX
, imode
, EXPAND_NORMAL
);
3499 /* For MEMs simplify_gen_subreg may generate an invalid new address
3500 because, e.g., the original address is considered mode-dependent
3501 by the target, which restricts simplify_subreg from invoking
3502 adjust_address_nv. Instead of preparing fallback support for an
3503 invalid address, we call adjust_address_nv directly. */
3505 return adjust_address_nv (cplx
, imode
,
3506 imag_p
? GET_MODE_SIZE (imode
) : 0);
3508 /* If the sub-object is at least word sized, then we know that subregging
3509 will work. This special case is important, since extract_bit_field
3510 wants to operate on integer modes, and there's rarely an OImode to
3511 correspond to TCmode. */
3512 if (ibitsize
>= BITS_PER_WORD
3513 /* For hard regs we have exact predicates. Assume we can split
3514 the original object if it spans an even number of hard regs.
3515 This special case is important for SCmode on 64-bit platforms
3516 where the natural size of floating-point regs is 32-bit. */
3518 && REGNO (cplx
) < FIRST_PSEUDO_REGISTER
3519 && REG_NREGS (cplx
) % 2 == 0))
3521 rtx ret
= simplify_gen_subreg (imode
, cplx
, cmode
,
3522 imag_p
? GET_MODE_SIZE (imode
) : 0);
3526 /* simplify_gen_subreg may fail for sub-word MEMs. */
3527 gcc_assert (MEM_P (cplx
) && ibitsize
< BITS_PER_WORD
);
3530 return extract_bit_field (cplx
, ibitsize
, imag_p
? ibitsize
: 0,
3531 true, NULL_RTX
, imode
, imode
, false, NULL
);
3534 /* A subroutine of emit_move_insn_1. Yet another lowpart generator.
3535 NEW_MODE and OLD_MODE are the same size. Return NULL if X cannot be
3536 represented in NEW_MODE. If FORCE is true, this will never happen, as
3537 we'll force-create a SUBREG if needed. */
3540 emit_move_change_mode (machine_mode new_mode
,
3541 machine_mode old_mode
, rtx x
, bool force
)
3545 if (push_operand (x
, GET_MODE (x
)))
3547 ret
= gen_rtx_MEM (new_mode
, XEXP (x
, 0));
3548 MEM_COPY_ATTRIBUTES (ret
, x
);
3552 /* We don't have to worry about changing the address since the
3553 size in bytes is supposed to be the same. */
3554 if (reload_in_progress
)
3556 /* Copy the MEM to change the mode and move any
3557 substitutions from the old MEM to the new one. */
3558 ret
= adjust_address_nv (x
, new_mode
, 0);
3559 copy_replacements (x
, ret
);
3562 ret
= adjust_address (x
, new_mode
, 0);
3566 /* Note that we do want simplify_subreg's behavior of validating
3567 that the new mode is ok for a hard register. If we were to use
3568 simplify_gen_subreg, we would create the subreg, but would
3569 probably run into the target not being able to implement it. */
3570 /* Except, of course, when FORCE is true, when this is exactly what
3571 we want. Which is needed for CCmodes on some targets. */
3573 ret
= simplify_gen_subreg (new_mode
, x
, old_mode
, 0);
3575 ret
= simplify_subreg (new_mode
, x
, old_mode
, 0);
3581 /* A subroutine of emit_move_insn_1. Generate a move from Y into X using
3582 an integer mode of the same size as MODE. Returns the instruction
3583 emitted, or NULL if such a move could not be generated. */
3586 emit_move_via_integer (machine_mode mode
, rtx x
, rtx y
, bool force
)
3588 scalar_int_mode imode
;
3589 enum insn_code code
;
3591 /* There must exist a mode of the exact size we require. */
3592 if (!int_mode_for_mode (mode
).exists (&imode
))
3595 /* The target must support moves in this mode. */
3596 code
= optab_handler (mov_optab
, imode
);
3597 if (code
== CODE_FOR_nothing
)
3600 x
= emit_move_change_mode (imode
, mode
, x
, force
);
3603 y
= emit_move_change_mode (imode
, mode
, y
, force
);
3606 return emit_insn (GEN_FCN (code
) (x
, y
));
3609 /* A subroutine of emit_move_insn_1. X is a push_operand in MODE.
3610 Return an equivalent MEM that does not use an auto-increment. */
3613 emit_move_resolve_push (machine_mode mode
, rtx x
)
3615 enum rtx_code code
= GET_CODE (XEXP (x
, 0));
3618 poly_int64 adjust
= GET_MODE_SIZE (mode
);
3619 #ifdef PUSH_ROUNDING
3620 adjust
= PUSH_ROUNDING (adjust
);
3622 if (code
== PRE_DEC
|| code
== POST_DEC
)
3624 else if (code
== PRE_MODIFY
|| code
== POST_MODIFY
)
3626 rtx expr
= XEXP (XEXP (x
, 0), 1);
3628 gcc_assert (GET_CODE (expr
) == PLUS
|| GET_CODE (expr
) == MINUS
);
3629 poly_int64 val
= rtx_to_poly_int64 (XEXP (expr
, 1));
3630 if (GET_CODE (expr
) == MINUS
)
3632 gcc_assert (known_eq (adjust
, val
) || known_eq (adjust
, -val
));
3636 /* Do not use anti_adjust_stack, since we don't want to update
3637 stack_pointer_delta. */
3638 temp
= expand_simple_binop (Pmode
, PLUS
, stack_pointer_rtx
,
3639 gen_int_mode (adjust
, Pmode
), stack_pointer_rtx
,
3640 0, OPTAB_LIB_WIDEN
);
3641 if (temp
!= stack_pointer_rtx
)
3642 emit_move_insn (stack_pointer_rtx
, temp
);
3649 temp
= stack_pointer_rtx
;
3654 temp
= plus_constant (Pmode
, stack_pointer_rtx
, -adjust
);
3660 return replace_equiv_address (x
, temp
);
3663 /* A subroutine of emit_move_complex. Generate a move from Y into X.
3664 X is known to satisfy push_operand, and MODE is known to be complex.
3665 Returns the last instruction emitted. */
3668 emit_move_complex_push (machine_mode mode
, rtx x
, rtx y
)
3670 scalar_mode submode
= GET_MODE_INNER (mode
);
3673 #ifdef PUSH_ROUNDING
3674 poly_int64 submodesize
= GET_MODE_SIZE (submode
);
3676 /* In case we output to the stack, but the size is smaller than the
3677 machine can push exactly, we need to use move instructions. */
3678 if (maybe_ne (PUSH_ROUNDING (submodesize
), submodesize
))
3680 x
= emit_move_resolve_push (mode
, x
);
3681 return emit_move_insn (x
, y
);
3685 /* Note that the real part always precedes the imag part in memory
3686 regardless of machine's endianness. */
3687 switch (GET_CODE (XEXP (x
, 0)))
3701 emit_move_insn (gen_rtx_MEM (submode
, XEXP (x
, 0)),
3702 read_complex_part (y
, imag_first
));
3703 return emit_move_insn (gen_rtx_MEM (submode
, XEXP (x
, 0)),
3704 read_complex_part (y
, !imag_first
));
3707 /* A subroutine of emit_move_complex. Perform the move from Y to X
3708 via two moves of the parts. Returns the last instruction emitted. */
3711 emit_move_complex_parts (rtx x
, rtx y
)
3713 /* Show the output dies here. This is necessary for SUBREGs
3714 of pseudos since we cannot track their lifetimes correctly;
3715 hard regs shouldn't appear here except as return values. */
3716 if (!reload_completed
&& !reload_in_progress
3717 && REG_P (x
) && !reg_overlap_mentioned_p (x
, y
))
3720 write_complex_part (x
, read_complex_part (y
, false), false);
3721 write_complex_part (x
, read_complex_part (y
, true), true);
3723 return get_last_insn ();
3726 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3727 MODE is known to be complex. Returns the last instruction emitted. */
3730 emit_move_complex (machine_mode mode
, rtx x
, rtx y
)
3734 /* Need to take special care for pushes, to maintain proper ordering
3735 of the data, and possibly extra padding. */
3736 if (push_operand (x
, mode
))
3737 return emit_move_complex_push (mode
, x
, y
);
3739 /* See if we can coerce the target into moving both values at once, except
3740 for floating point where we favor moving as parts if this is easy. */
3741 if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
3742 && optab_handler (mov_optab
, GET_MODE_INNER (mode
)) != CODE_FOR_nothing
3744 && HARD_REGISTER_P (x
)
3745 && REG_NREGS (x
) == 1)
3747 && HARD_REGISTER_P (y
)
3748 && REG_NREGS (y
) == 1))
3750 /* Not possible if the values are inherently not adjacent. */
3751 else if (GET_CODE (x
) == CONCAT
|| GET_CODE (y
) == CONCAT
)
3753 /* Is possible if both are registers (or subregs of registers). */
3754 else if (register_operand (x
, mode
) && register_operand (y
, mode
))
3756 /* If one of the operands is a memory, and alignment constraints
3757 are friendly enough, we may be able to do combined memory operations.
3758 We do not attempt this if Y is a constant because that combination is
3759 usually better with the by-parts thing below. */
3760 else if ((MEM_P (x
) ? !CONSTANT_P (y
) : MEM_P (y
))
3761 && (!STRICT_ALIGNMENT
3762 || get_mode_alignment (mode
) == BIGGEST_ALIGNMENT
))
3771 /* For memory to memory moves, optimal behavior can be had with the
3772 existing block move logic. But use normal expansion if optimizing
3774 if (MEM_P (x
) && MEM_P (y
))
3776 emit_block_move (x
, y
, gen_int_mode (GET_MODE_SIZE (mode
), Pmode
),
3777 (optimize_insn_for_speed_p()
3778 ? BLOCK_OP_NO_LIBCALL
: BLOCK_OP_NORMAL
));
3779 return get_last_insn ();
3782 ret
= emit_move_via_integer (mode
, x
, y
, true);
3787 return emit_move_complex_parts (x
, y
);
3790 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3791 MODE is known to be MODE_CC. Returns the last instruction emitted. */
3794 emit_move_ccmode (machine_mode mode
, rtx x
, rtx y
)
3798 /* Assume all MODE_CC modes are equivalent; if we have movcc, use it. */
3801 enum insn_code code
= optab_handler (mov_optab
, CCmode
);
3802 if (code
!= CODE_FOR_nothing
)
3804 x
= emit_move_change_mode (CCmode
, mode
, x
, true);
3805 y
= emit_move_change_mode (CCmode
, mode
, y
, true);
3806 return emit_insn (GEN_FCN (code
) (x
, y
));
3810 /* Otherwise, find the MODE_INT mode of the same width. */
3811 ret
= emit_move_via_integer (mode
, x
, y
, false);
3812 gcc_assert (ret
!= NULL
);
3816 /* Return true if word I of OP lies entirely in the
3817 undefined bits of a paradoxical subreg. */
3820 undefined_operand_subword_p (const_rtx op
, int i
)
3822 if (GET_CODE (op
) != SUBREG
)
3824 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3825 poly_int64 offset
= i
* UNITS_PER_WORD
+ subreg_memory_offset (op
);
3826 return (known_ge (offset
, GET_MODE_SIZE (innermostmode
))
3827 || known_le (offset
, -UNITS_PER_WORD
));
3830 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3831 MODE is any multi-word or full-word mode that lacks a move_insn
3832 pattern. Note that you will get better code if you define such
3833 patterns, even if they must turn into multiple assembler instructions. */
3836 emit_move_multi_word (machine_mode mode
, rtx x
, rtx y
)
3838 rtx_insn
*last_insn
= 0;
3844 /* This function can only handle cases where the number of words is
3845 known at compile time. */
3846 mode_size
= GET_MODE_SIZE (mode
).to_constant ();
3847 gcc_assert (mode_size
>= UNITS_PER_WORD
);
3849 /* If X is a push on the stack, do the push now and replace
3850 X with a reference to the stack pointer. */
3851 if (push_operand (x
, mode
))
3852 x
= emit_move_resolve_push (mode
, x
);
3854 /* If we are in reload, see if either operand is a MEM whose address
3855 is scheduled for replacement. */
3856 if (reload_in_progress
&& MEM_P (x
)
3857 && (inner
= find_replacement (&XEXP (x
, 0))) != XEXP (x
, 0))
3858 x
= replace_equiv_address_nv (x
, inner
);
3859 if (reload_in_progress
&& MEM_P (y
)
3860 && (inner
= find_replacement (&XEXP (y
, 0))) != XEXP (y
, 0))
3861 y
= replace_equiv_address_nv (y
, inner
);
3865 need_clobber
= false;
3866 for (i
= 0; i
< CEIL (mode_size
, UNITS_PER_WORD
); i
++)
3868 /* Do not generate code for a move if it would go entirely
3869 to the non-existing bits of a paradoxical subreg. */
3870 if (undefined_operand_subword_p (x
, i
))
3873 rtx xpart
= operand_subword (x
, i
, 1, mode
);
3876 /* Do not generate code for a move if it would come entirely
3877 from the undefined bits of a paradoxical subreg. */
3878 if (undefined_operand_subword_p (y
, i
))
3881 ypart
= operand_subword (y
, i
, 1, mode
);
3883 /* If we can't get a part of Y, put Y into memory if it is a
3884 constant. Otherwise, force it into a register. Then we must
3885 be able to get a part of Y. */
3886 if (ypart
== 0 && CONSTANT_P (y
))
3888 y
= use_anchored_address (force_const_mem (mode
, y
));
3889 ypart
= operand_subword (y
, i
, 1, mode
);
3891 else if (ypart
== 0)
3892 ypart
= operand_subword_force (y
, i
, mode
);
3894 gcc_assert (xpart
&& ypart
);
3896 need_clobber
|= (GET_CODE (xpart
) == SUBREG
);
3898 last_insn
= emit_move_insn (xpart
, ypart
);
3904 /* Show the output dies here. This is necessary for SUBREGs
3905 of pseudos since we cannot track their lifetimes correctly;
3906 hard regs shouldn't appear here except as return values.
3907 We never want to emit such a clobber after reload. */
3909 && ! (reload_in_progress
|| reload_completed
)
3910 && need_clobber
!= 0)
3918 /* Low level part of emit_move_insn.
3919 Called just like emit_move_insn, but assumes X and Y
3920 are basically valid. */
3923 emit_move_insn_1 (rtx x
, rtx y
)
3925 machine_mode mode
= GET_MODE (x
);
3926 enum insn_code code
;
3928 gcc_assert ((unsigned int) mode
< (unsigned int) MAX_MACHINE_MODE
);
3930 code
= optab_handler (mov_optab
, mode
);
3931 if (code
!= CODE_FOR_nothing
)
3932 return emit_insn (GEN_FCN (code
) (x
, y
));
3934 /* Expand complex moves by moving real part and imag part. */
3935 if (COMPLEX_MODE_P (mode
))
3936 return emit_move_complex (mode
, x
, y
);
3938 if (GET_MODE_CLASS (mode
) == MODE_DECIMAL_FLOAT
3939 || ALL_FIXED_POINT_MODE_P (mode
))
3941 rtx_insn
*result
= emit_move_via_integer (mode
, x
, y
, true);
3943 /* If we can't find an integer mode, use multi words. */
3947 return emit_move_multi_word (mode
, x
, y
);
3950 if (GET_MODE_CLASS (mode
) == MODE_CC
)
3951 return emit_move_ccmode (mode
, x
, y
);
3953 /* Try using a move pattern for the corresponding integer mode. This is
3954 only safe when simplify_subreg can convert MODE constants into integer
3955 constants. At present, it can only do this reliably if the value
3956 fits within a HOST_WIDE_INT. */
3958 || known_le (GET_MODE_BITSIZE (mode
), HOST_BITS_PER_WIDE_INT
))
3960 rtx_insn
*ret
= emit_move_via_integer (mode
, x
, y
, lra_in_progress
);
3964 if (! lra_in_progress
|| recog (PATTERN (ret
), ret
, 0) >= 0)
3969 return emit_move_multi_word (mode
, x
, y
);
3972 /* Generate code to copy Y into X.
3973 Both Y and X must have the same mode, except that
3974 Y can be a constant with VOIDmode.
3975 This mode cannot be BLKmode; use emit_block_move for that.
3977 Return the last instruction emitted. */
3980 emit_move_insn (rtx x
, rtx y
)
3982 machine_mode mode
= GET_MODE (x
);
3983 rtx y_cst
= NULL_RTX
;
3984 rtx_insn
*last_insn
;
3987 gcc_assert (mode
!= BLKmode
3988 && (GET_MODE (y
) == mode
|| GET_MODE (y
) == VOIDmode
));
3990 /* If we have a copy that looks like one of the following patterns:
3991 (set (subreg:M1 (reg:M2 ...)) (subreg:M1 (reg:M2 ...)))
3992 (set (subreg:M1 (reg:M2 ...)) (mem:M1 ADDR))
3993 (set (mem:M1 ADDR) (subreg:M1 (reg:M2 ...)))
3994 (set (subreg:M1 (reg:M2 ...)) (constant C))
3995 where mode M1 is equal in size to M2, try to detect whether the
3996 mode change involves an implicit round trip through memory.
3997 If so, see if we can avoid that by removing the subregs and
3998 doing the move in mode M2 instead. */
4000 rtx x_inner
= NULL_RTX
;
4001 rtx y_inner
= NULL_RTX
;
4003 auto candidate_subreg_p
= [&](rtx subreg
) {
4004 return (REG_P (SUBREG_REG (subreg
))
4005 && known_eq (GET_MODE_SIZE (GET_MODE (SUBREG_REG (subreg
))),
4006 GET_MODE_SIZE (GET_MODE (subreg
)))
4007 && optab_handler (mov_optab
, GET_MODE (SUBREG_REG (subreg
)))
4008 != CODE_FOR_nothing
);
4011 auto candidate_mem_p
= [&](machine_mode innermode
, rtx mem
) {
4012 return (!targetm
.can_change_mode_class (innermode
, GET_MODE (mem
), ALL_REGS
)
4013 && !push_operand (mem
, GET_MODE (mem
))
4014 /* Not a candiate if innermode requires too much alignment. */
4015 && (MEM_ALIGN (mem
) >= GET_MODE_ALIGNMENT (innermode
)
4016 || targetm
.slow_unaligned_access (GET_MODE (mem
),
4018 || !targetm
.slow_unaligned_access (innermode
,
4022 if (SUBREG_P (x
) && candidate_subreg_p (x
))
4023 x_inner
= SUBREG_REG (x
);
4025 if (SUBREG_P (y
) && candidate_subreg_p (y
))
4026 y_inner
= SUBREG_REG (y
);
4028 if (x_inner
!= NULL_RTX
4029 && y_inner
!= NULL_RTX
4030 && GET_MODE (x_inner
) == GET_MODE (y_inner
)
4031 && !targetm
.can_change_mode_class (GET_MODE (x_inner
), mode
, ALL_REGS
))
4035 mode
= GET_MODE (x_inner
);
4037 else if (x_inner
!= NULL_RTX
4039 && candidate_mem_p (GET_MODE (x_inner
), y
))
4042 y
= adjust_address (y
, GET_MODE (x_inner
), 0);
4043 mode
= GET_MODE (x_inner
);
4045 else if (y_inner
!= NULL_RTX
4047 && candidate_mem_p (GET_MODE (y_inner
), x
))
4049 x
= adjust_address (x
, GET_MODE (y_inner
), 0);
4051 mode
= GET_MODE (y_inner
);
4053 else if (x_inner
!= NULL_RTX
4055 && !targetm
.can_change_mode_class (GET_MODE (x_inner
),
4057 && (y_inner
= simplify_subreg (GET_MODE (x_inner
), y
, mode
, 0)))
4061 mode
= GET_MODE (x_inner
);
4067 && SCALAR_FLOAT_MODE_P (GET_MODE (x
))
4068 && (last_insn
= compress_float_constant (x
, y
)))
4073 if (!targetm
.legitimate_constant_p (mode
, y
))
4075 y
= force_const_mem (mode
, y
);
4077 /* If the target's cannot_force_const_mem prevented the spill,
4078 assume that the target's move expanders will also take care
4079 of the non-legitimate constant. */
4083 y
= use_anchored_address (y
);
4087 /* If X or Y are memory references, verify that their addresses are valid
4090 && (! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4092 && ! push_operand (x
, GET_MODE (x
))))
4093 x
= validize_mem (x
);
4096 && ! memory_address_addr_space_p (GET_MODE (y
), XEXP (y
, 0),
4097 MEM_ADDR_SPACE (y
)))
4098 y
= validize_mem (y
);
4100 gcc_assert (mode
!= BLKmode
);
4102 last_insn
= emit_move_insn_1 (x
, y
);
4104 if (y_cst
&& REG_P (x
)
4105 && (set
= single_set (last_insn
)) != NULL_RTX
4106 && SET_DEST (set
) == x
4107 && ! rtx_equal_p (y_cst
, SET_SRC (set
)))
4108 set_unique_reg_note (last_insn
, REG_EQUAL
, copy_rtx (y_cst
));
4113 /* Generate the body of an instruction to copy Y into X.
4114 It may be a list of insns, if one insn isn't enough. */
4117 gen_move_insn (rtx x
, rtx y
)
4122 emit_move_insn_1 (x
, y
);
4128 /* If Y is representable exactly in a narrower mode, and the target can
4129 perform the extension directly from constant or memory, then emit the
4130 move as an extension. */
4133 compress_float_constant (rtx x
, rtx y
)
4135 machine_mode dstmode
= GET_MODE (x
);
4136 machine_mode orig_srcmode
= GET_MODE (y
);
4137 machine_mode srcmode
;
4138 const REAL_VALUE_TYPE
*r
;
4139 int oldcost
, newcost
;
4140 bool speed
= optimize_insn_for_speed_p ();
4142 r
= CONST_DOUBLE_REAL_VALUE (y
);
4144 if (targetm
.legitimate_constant_p (dstmode
, y
))
4145 oldcost
= set_src_cost (y
, orig_srcmode
, speed
);
4147 oldcost
= set_src_cost (force_const_mem (dstmode
, y
), dstmode
, speed
);
4149 FOR_EACH_MODE_UNTIL (srcmode
, orig_srcmode
)
4153 rtx_insn
*last_insn
;
4155 /* Skip if the target can't extend this way. */
4156 ic
= can_extend_p (dstmode
, srcmode
, 0);
4157 if (ic
== CODE_FOR_nothing
)
4160 /* Skip if the narrowed value isn't exact. */
4161 if (! exact_real_truncate (srcmode
, r
))
4164 trunc_y
= const_double_from_real_value (*r
, srcmode
);
4166 if (targetm
.legitimate_constant_p (srcmode
, trunc_y
))
4168 /* Skip if the target needs extra instructions to perform
4170 if (!insn_operand_matches (ic
, 1, trunc_y
))
4172 /* This is valid, but may not be cheaper than the original. */
4173 newcost
= set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode
, trunc_y
),
4175 if (oldcost
< newcost
)
4178 else if (float_extend_from_mem
[dstmode
][srcmode
])
4180 trunc_y
= force_const_mem (srcmode
, trunc_y
);
4181 /* This is valid, but may not be cheaper than the original. */
4182 newcost
= set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode
, trunc_y
),
4184 if (oldcost
< newcost
)
4186 trunc_y
= validize_mem (trunc_y
);
4191 /* For CSE's benefit, force the compressed constant pool entry
4192 into a new pseudo. This constant may be used in different modes,
4193 and if not, combine will put things back together for us. */
4194 trunc_y
= force_reg (srcmode
, trunc_y
);
4196 /* If x is a hard register, perform the extension into a pseudo,
4197 so that e.g. stack realignment code is aware of it. */
4199 if (REG_P (x
) && HARD_REGISTER_P (x
))
4200 target
= gen_reg_rtx (dstmode
);
4202 emit_unop_insn (ic
, target
, trunc_y
, UNKNOWN
);
4203 last_insn
= get_last_insn ();
4206 set_unique_reg_note (last_insn
, REG_EQUAL
, y
);
4209 return emit_move_insn (x
, target
);
4216 /* Pushing data onto the stack. */
4218 /* Push a block of length SIZE (perhaps variable)
4219 and return an rtx to address the beginning of the block.
4220 The value may be virtual_outgoing_args_rtx.
4222 EXTRA is the number of bytes of padding to push in addition to SIZE.
4223 BELOW nonzero means this padding comes at low addresses;
4224 otherwise, the padding comes at high addresses. */
4227 push_block (rtx size
, poly_int64 extra
, int below
)
4231 size
= convert_modes (Pmode
, ptr_mode
, size
, 1);
4232 if (CONSTANT_P (size
))
4233 anti_adjust_stack (plus_constant (Pmode
, size
, extra
));
4234 else if (REG_P (size
) && known_eq (extra
, 0))
4235 anti_adjust_stack (size
);
4238 temp
= copy_to_mode_reg (Pmode
, size
);
4239 if (maybe_ne (extra
, 0))
4240 temp
= expand_binop (Pmode
, add_optab
, temp
,
4241 gen_int_mode (extra
, Pmode
),
4242 temp
, 0, OPTAB_LIB_WIDEN
);
4243 anti_adjust_stack (temp
);
4246 if (STACK_GROWS_DOWNWARD
)
4248 temp
= virtual_outgoing_args_rtx
;
4249 if (maybe_ne (extra
, 0) && below
)
4250 temp
= plus_constant (Pmode
, temp
, extra
);
4255 if (poly_int_rtx_p (size
, &csize
))
4256 temp
= plus_constant (Pmode
, virtual_outgoing_args_rtx
,
4257 -csize
- (below
? 0 : extra
));
4258 else if (maybe_ne (extra
, 0) && !below
)
4259 temp
= gen_rtx_PLUS (Pmode
, virtual_outgoing_args_rtx
,
4260 negate_rtx (Pmode
, plus_constant (Pmode
, size
,
4263 temp
= gen_rtx_PLUS (Pmode
, virtual_outgoing_args_rtx
,
4264 negate_rtx (Pmode
, size
));
4267 return memory_address (NARROWEST_INT_MODE
, temp
);
4270 /* A utility routine that returns the base of an auto-inc memory, or NULL. */
4273 mem_autoinc_base (rtx mem
)
4277 rtx addr
= XEXP (mem
, 0);
4278 if (GET_RTX_CLASS (GET_CODE (addr
)) == RTX_AUTOINC
)
4279 return XEXP (addr
, 0);
4284 /* A utility routine used here, in reload, and in try_split. The insns
4285 after PREV up to and including LAST are known to adjust the stack,
4286 with a final value of END_ARGS_SIZE. Iterate backward from LAST
4287 placing notes as appropriate. PREV may be NULL, indicating the
4288 entire insn sequence prior to LAST should be scanned.
4290 The set of allowed stack pointer modifications is small:
4291 (1) One or more auto-inc style memory references (aka pushes),
4292 (2) One or more addition/subtraction with the SP as destination,
4293 (3) A single move insn with the SP as destination,
4294 (4) A call_pop insn,
4295 (5) Noreturn call insns if !ACCUMULATE_OUTGOING_ARGS.
4297 Insns in the sequence that do not modify the SP are ignored,
4298 except for noreturn calls.
4300 The return value is the amount of adjustment that can be trivially
4301 verified, via immediate operand or auto-inc. If the adjustment
4302 cannot be trivially extracted, the return value is HOST_WIDE_INT_MIN. */
4305 find_args_size_adjust (rtx_insn
*insn
)
4310 pat
= PATTERN (insn
);
4313 /* Look for a call_pop pattern. */
4316 /* We have to allow non-call_pop patterns for the case
4317 of emit_single_push_insn of a TLS address. */
4318 if (GET_CODE (pat
) != PARALLEL
)
4321 /* All call_pop have a stack pointer adjust in the parallel.
4322 The call itself is always first, and the stack adjust is
4323 usually last, so search from the end. */
4324 for (i
= XVECLEN (pat
, 0) - 1; i
> 0; --i
)
4326 set
= XVECEXP (pat
, 0, i
);
4327 if (GET_CODE (set
) != SET
)
4329 dest
= SET_DEST (set
);
4330 if (dest
== stack_pointer_rtx
)
4333 /* We'd better have found the stack pointer adjust. */
4336 /* Fall through to process the extracted SET and DEST
4337 as if it was a standalone insn. */
4339 else if (GET_CODE (pat
) == SET
)
4341 else if ((set
= single_set (insn
)) != NULL
)
4343 else if (GET_CODE (pat
) == PARALLEL
)
4345 /* ??? Some older ports use a parallel with a stack adjust
4346 and a store for a PUSH_ROUNDING pattern, rather than a
4347 PRE/POST_MODIFY rtx. Don't force them to update yet... */
4348 /* ??? See h8300 and m68k, pushqi1. */
4349 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; --i
)
4351 set
= XVECEXP (pat
, 0, i
);
4352 if (GET_CODE (set
) != SET
)
4354 dest
= SET_DEST (set
);
4355 if (dest
== stack_pointer_rtx
)
4358 /* We do not expect an auto-inc of the sp in the parallel. */
4359 gcc_checking_assert (mem_autoinc_base (dest
) != stack_pointer_rtx
);
4360 gcc_checking_assert (mem_autoinc_base (SET_SRC (set
))
4361 != stack_pointer_rtx
);
4369 dest
= SET_DEST (set
);
4371 /* Look for direct modifications of the stack pointer. */
4372 if (REG_P (dest
) && REGNO (dest
) == STACK_POINTER_REGNUM
)
4374 /* Look for a trivial adjustment, otherwise assume nothing. */
4375 /* Note that the SPU restore_stack_block pattern refers to
4376 the stack pointer in V4SImode. Consider that non-trivial. */
4378 if (SCALAR_INT_MODE_P (GET_MODE (dest
))
4379 && strip_offset (SET_SRC (set
), &offset
) == stack_pointer_rtx
)
4381 /* ??? Reload can generate no-op moves, which will be cleaned
4382 up later. Recognize it and continue searching. */
4383 else if (rtx_equal_p (dest
, SET_SRC (set
)))
4386 return HOST_WIDE_INT_MIN
;
4392 /* Otherwise only think about autoinc patterns. */
4393 if (mem_autoinc_base (dest
) == stack_pointer_rtx
)
4396 gcc_checking_assert (mem_autoinc_base (SET_SRC (set
))
4397 != stack_pointer_rtx
);
4399 else if (mem_autoinc_base (SET_SRC (set
)) == stack_pointer_rtx
)
4400 mem
= SET_SRC (set
);
4404 addr
= XEXP (mem
, 0);
4405 switch (GET_CODE (addr
))
4409 return GET_MODE_SIZE (GET_MODE (mem
));
4412 return -GET_MODE_SIZE (GET_MODE (mem
));
4415 addr
= XEXP (addr
, 1);
4416 gcc_assert (GET_CODE (addr
) == PLUS
);
4417 gcc_assert (XEXP (addr
, 0) == stack_pointer_rtx
);
4418 return rtx_to_poly_int64 (XEXP (addr
, 1));
4426 fixup_args_size_notes (rtx_insn
*prev
, rtx_insn
*last
,
4427 poly_int64 end_args_size
)
4429 poly_int64 args_size
= end_args_size
;
4430 bool saw_unknown
= false;
4433 for (insn
= last
; insn
!= prev
; insn
= PREV_INSN (insn
))
4435 if (!NONDEBUG_INSN_P (insn
))
4438 /* We might have existing REG_ARGS_SIZE notes, e.g. when pushing
4439 a call argument containing a TLS address that itself requires
4440 a call to __tls_get_addr. The handling of stack_pointer_delta
4441 in emit_single_push_insn is supposed to ensure that any such
4442 notes are already correct. */
4443 rtx note
= find_reg_note (insn
, REG_ARGS_SIZE
, NULL_RTX
);
4444 gcc_assert (!note
|| known_eq (args_size
, get_args_size (note
)));
4446 poly_int64 this_delta
= find_args_size_adjust (insn
);
4447 if (known_eq (this_delta
, 0))
4450 || ACCUMULATE_OUTGOING_ARGS
4451 || find_reg_note (insn
, REG_NORETURN
, NULL_RTX
) == NULL_RTX
)
4455 gcc_assert (!saw_unknown
);
4456 if (known_eq (this_delta
, HOST_WIDE_INT_MIN
))
4460 add_args_size_note (insn
, args_size
);
4461 if (STACK_GROWS_DOWNWARD
)
4462 this_delta
= -poly_uint64 (this_delta
);
4465 args_size
= HOST_WIDE_INT_MIN
;
4467 args_size
-= this_delta
;
4473 #ifdef PUSH_ROUNDING
4474 /* Emit single push insn. */
4477 emit_single_push_insn_1 (machine_mode mode
, rtx x
, tree type
)
4480 poly_int64 rounded_size
= PUSH_ROUNDING (GET_MODE_SIZE (mode
));
4482 enum insn_code icode
;
4484 /* If there is push pattern, use it. Otherwise try old way of throwing
4485 MEM representing push operation to move expander. */
4486 icode
= optab_handler (push_optab
, mode
);
4487 if (icode
!= CODE_FOR_nothing
)
4489 class expand_operand ops
[1];
4491 create_input_operand (&ops
[0], x
, mode
);
4492 if (maybe_expand_insn (icode
, 1, ops
))
4495 if (known_eq (GET_MODE_SIZE (mode
), rounded_size
))
4496 dest_addr
= gen_rtx_fmt_e (STACK_PUSH_CODE
, Pmode
, stack_pointer_rtx
);
4497 /* If we are to pad downward, adjust the stack pointer first and
4498 then store X into the stack location using an offset. This is
4499 because emit_move_insn does not know how to pad; it does not have
4501 else if (targetm
.calls
.function_arg_padding (mode
, type
) == PAD_DOWNWARD
)
4503 emit_move_insn (stack_pointer_rtx
,
4504 expand_binop (Pmode
,
4505 STACK_GROWS_DOWNWARD
? sub_optab
4508 gen_int_mode (rounded_size
, Pmode
),
4509 NULL_RTX
, 0, OPTAB_LIB_WIDEN
));
4511 poly_int64 offset
= rounded_size
- GET_MODE_SIZE (mode
);
4512 if (STACK_GROWS_DOWNWARD
&& STACK_PUSH_CODE
== POST_DEC
)
4513 /* We have already decremented the stack pointer, so get the
4515 offset
+= rounded_size
;
4517 if (!STACK_GROWS_DOWNWARD
&& STACK_PUSH_CODE
== POST_INC
)
4518 /* We have already incremented the stack pointer, so get the
4520 offset
-= rounded_size
;
4522 dest_addr
= plus_constant (Pmode
, stack_pointer_rtx
, offset
);
4526 if (STACK_GROWS_DOWNWARD
)
4527 /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */
4528 dest_addr
= plus_constant (Pmode
, stack_pointer_rtx
, -rounded_size
);
4530 /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */
4531 dest_addr
= plus_constant (Pmode
, stack_pointer_rtx
, rounded_size
);
4533 dest_addr
= gen_rtx_PRE_MODIFY (Pmode
, stack_pointer_rtx
, dest_addr
);
4536 dest
= gen_rtx_MEM (mode
, dest_addr
);
4540 set_mem_attributes (dest
, type
, 1);
4542 if (cfun
->tail_call_marked
)
4543 /* Function incoming arguments may overlap with sibling call
4544 outgoing arguments and we cannot allow reordering of reads
4545 from function arguments with stores to outgoing arguments
4546 of sibling calls. */
4547 set_mem_alias_set (dest
, 0);
4549 emit_move_insn (dest
, x
);
4552 /* Emit and annotate a single push insn. */
4555 emit_single_push_insn (machine_mode mode
, rtx x
, tree type
)
4557 poly_int64 delta
, old_delta
= stack_pointer_delta
;
4558 rtx_insn
*prev
= get_last_insn ();
4561 emit_single_push_insn_1 (mode
, x
, type
);
4563 /* Adjust stack_pointer_delta to describe the situation after the push
4564 we just performed. Note that we must do this after the push rather
4565 than before the push in case calculating X needs pushes and pops of
4566 its own (e.g. if calling __tls_get_addr). The REG_ARGS_SIZE notes
4567 for such pushes and pops must not include the effect of the future
4569 stack_pointer_delta
+= PUSH_ROUNDING (GET_MODE_SIZE (mode
));
4571 last
= get_last_insn ();
4573 /* Notice the common case where we emitted exactly one insn. */
4574 if (PREV_INSN (last
) == prev
)
4576 add_args_size_note (last
, stack_pointer_delta
);
4580 delta
= fixup_args_size_notes (prev
, last
, stack_pointer_delta
);
4581 gcc_assert (known_eq (delta
, HOST_WIDE_INT_MIN
)
4582 || known_eq (delta
, old_delta
));
4586 /* If reading SIZE bytes from X will end up reading from
4587 Y return the number of bytes that overlap. Return -1
4588 if there is no overlap or -2 if we can't determine
4589 (for example when X and Y have different base registers). */
4592 memory_load_overlap (rtx x
, rtx y
, HOST_WIDE_INT size
)
4594 rtx tmp
= plus_constant (Pmode
, x
, size
);
4595 rtx sub
= simplify_gen_binary (MINUS
, Pmode
, tmp
, y
);
4597 if (!CONST_INT_P (sub
))
4600 HOST_WIDE_INT val
= INTVAL (sub
);
4602 return IN_RANGE (val
, 1, size
) ? val
: -1;
4605 /* Generate code to push X onto the stack, assuming it has mode MODE and
4607 MODE is redundant except when X is a CONST_INT (since they don't
4609 SIZE is an rtx for the size of data to be copied (in bytes),
4610 needed only if X is BLKmode.
4611 Return true if successful. May return false if asked to push a
4612 partial argument during a sibcall optimization (as specified by
4613 SIBCALL_P) and the incoming and outgoing pointers cannot be shown
4616 ALIGN (in bits) is maximum alignment we can assume.
4618 If PARTIAL and REG are both nonzero, then copy that many of the first
4619 bytes of X into registers starting with REG, and push the rest of X.
4620 The amount of space pushed is decreased by PARTIAL bytes.
4621 REG must be a hard register in this case.
4622 If REG is zero but PARTIAL is not, take any all others actions for an
4623 argument partially in registers, but do not actually load any
4626 EXTRA is the amount in bytes of extra space to leave next to this arg.
4627 This is ignored if an argument block has already been allocated.
4629 On a machine that lacks real push insns, ARGS_ADDR is the address of
4630 the bottom of the argument block for this call. We use indexing off there
4631 to store the arg. On machines with push insns, ARGS_ADDR is 0 when a
4632 argument block has not been preallocated.
4634 ARGS_SO_FAR is the size of args previously pushed for this call.
4636 REG_PARM_STACK_SPACE is nonzero if functions require stack space
4637 for arguments passed in registers. If nonzero, it will be the number
4638 of bytes required. */
4641 emit_push_insn (rtx x
, machine_mode mode
, tree type
, rtx size
,
4642 unsigned int align
, int partial
, rtx reg
, poly_int64 extra
,
4643 rtx args_addr
, rtx args_so_far
, int reg_parm_stack_space
,
4644 rtx alignment_pad
, bool sibcall_p
)
4647 pad_direction stack_direction
4648 = STACK_GROWS_DOWNWARD
? PAD_DOWNWARD
: PAD_UPWARD
;
4650 /* Decide where to pad the argument: PAD_DOWNWARD for below,
4651 PAD_UPWARD for above, or PAD_NONE for don't pad it.
4652 Default is below for small data on big-endian machines; else above. */
4653 pad_direction where_pad
= targetm
.calls
.function_arg_padding (mode
, type
);
4655 /* Invert direction if stack is post-decrement.
4657 if (STACK_PUSH_CODE
== POST_DEC
)
4658 if (where_pad
!= PAD_NONE
)
4659 where_pad
= (where_pad
== PAD_DOWNWARD
? PAD_UPWARD
: PAD_DOWNWARD
);
4663 int nregs
= partial
/ UNITS_PER_WORD
;
4664 rtx
*tmp_regs
= NULL
;
4665 int overlapping
= 0;
4668 || (STRICT_ALIGNMENT
&& align
< GET_MODE_ALIGNMENT (mode
)))
4670 /* Copy a block into the stack, entirely or partially. */
4677 offset
= partial
% (PARM_BOUNDARY
/ BITS_PER_UNIT
);
4678 used
= partial
- offset
;
4680 if (mode
!= BLKmode
)
4682 /* A value is to be stored in an insufficiently aligned
4683 stack slot; copy via a suitably aligned slot if
4685 size
= gen_int_mode (GET_MODE_SIZE (mode
), Pmode
);
4686 if (!MEM_P (xinner
))
4688 temp
= assign_temp (type
, 1, 1);
4689 emit_move_insn (temp
, xinner
);
4696 /* USED is now the # of bytes we need not copy to the stack
4697 because registers will take care of them. */
4700 xinner
= adjust_address (xinner
, BLKmode
, used
);
4702 /* If the partial register-part of the arg counts in its stack size,
4703 skip the part of stack space corresponding to the registers.
4704 Otherwise, start copying to the beginning of the stack space,
4705 by setting SKIP to 0. */
4706 skip
= (reg_parm_stack_space
== 0) ? 0 : used
;
4708 #ifdef PUSH_ROUNDING
4709 /* NB: Let the backend known the number of bytes to push and
4710 decide if push insns should be generated. */
4711 unsigned int push_size
;
4712 if (CONST_INT_P (size
))
4713 push_size
= INTVAL (size
);
4717 /* Do it with several push insns if that doesn't take lots of insns
4718 and if there is no difficulty with push insns that skip bytes
4719 on the stack for alignment purposes. */
4721 && targetm
.calls
.push_argument (push_size
)
4722 && CONST_INT_P (size
)
4724 && MEM_ALIGN (xinner
) >= align
4725 && can_move_by_pieces ((unsigned) INTVAL (size
) - used
, align
)
4726 /* Here we avoid the case of a structure whose weak alignment
4727 forces many pushes of a small amount of data,
4728 and such small pushes do rounding that causes trouble. */
4729 && ((!targetm
.slow_unaligned_access (word_mode
, align
))
4730 || align
>= BIGGEST_ALIGNMENT
4731 || known_eq (PUSH_ROUNDING (align
/ BITS_PER_UNIT
),
4732 align
/ BITS_PER_UNIT
))
4733 && known_eq (PUSH_ROUNDING (INTVAL (size
)), INTVAL (size
)))
4735 /* Push padding now if padding above and stack grows down,
4736 or if padding below and stack grows up.
4737 But if space already allocated, this has already been done. */
4738 if (maybe_ne (extra
, 0)
4740 && where_pad
!= PAD_NONE
4741 && where_pad
!= stack_direction
)
4742 anti_adjust_stack (gen_int_mode (extra
, Pmode
));
4744 move_by_pieces (NULL
, xinner
, INTVAL (size
) - used
, align
,
4748 #endif /* PUSH_ROUNDING */
4752 /* Otherwise make space on the stack and copy the data
4753 to the address of that space. */
4755 /* Deduct words put into registers from the size we must copy. */
4758 if (CONST_INT_P (size
))
4759 size
= GEN_INT (INTVAL (size
) - used
);
4761 size
= expand_binop (GET_MODE (size
), sub_optab
, size
,
4762 gen_int_mode (used
, GET_MODE (size
)),
4763 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4766 /* Get the address of the stack space.
4767 In this case, we do not deal with EXTRA separately.
4768 A single stack adjust will do. */
4769 poly_int64 const_args_so_far
;
4772 temp
= push_block (size
, extra
, where_pad
== PAD_DOWNWARD
);
4775 else if (poly_int_rtx_p (args_so_far
, &const_args_so_far
))
4776 temp
= memory_address (BLKmode
,
4777 plus_constant (Pmode
, args_addr
,
4778 skip
+ const_args_so_far
));
4780 temp
= memory_address (BLKmode
,
4781 plus_constant (Pmode
,
4782 gen_rtx_PLUS (Pmode
,
4787 if (!ACCUMULATE_OUTGOING_ARGS
)
4789 /* If the source is referenced relative to the stack pointer,
4790 copy it to another register to stabilize it. We do not need
4791 to do this if we know that we won't be changing sp. */
4793 if (reg_mentioned_p (virtual_stack_dynamic_rtx
, temp
)
4794 || reg_mentioned_p (virtual_outgoing_args_rtx
, temp
))
4795 temp
= copy_to_reg (temp
);
4798 target
= gen_rtx_MEM (BLKmode
, temp
);
4800 /* We do *not* set_mem_attributes here, because incoming arguments
4801 may overlap with sibling call outgoing arguments and we cannot
4802 allow reordering of reads from function arguments with stores
4803 to outgoing arguments of sibling calls. We do, however, want
4804 to record the alignment of the stack slot. */
4805 /* ALIGN may well be better aligned than TYPE, e.g. due to
4806 PARM_BOUNDARY. Assume the caller isn't lying. */
4807 set_mem_align (target
, align
);
4809 /* If part should go in registers and pushing to that part would
4810 overwrite some of the values that need to go into regs, load the
4811 overlapping values into temporary pseudos to be moved into the hard
4812 regs at the end after the stack pushing has completed.
4813 We cannot load them directly into the hard regs here because
4814 they can be clobbered by the block move expansions.
4817 if (partial
> 0 && reg
!= 0 && mode
== BLKmode
4818 && GET_CODE (reg
) != PARALLEL
)
4820 overlapping
= memory_load_overlap (XEXP (x
, 0), temp
, partial
);
4821 if (overlapping
> 0)
4823 gcc_assert (overlapping
% UNITS_PER_WORD
== 0);
4824 overlapping
/= UNITS_PER_WORD
;
4826 tmp_regs
= XALLOCAVEC (rtx
, overlapping
);
4828 for (int i
= 0; i
< overlapping
; i
++)
4829 tmp_regs
[i
] = gen_reg_rtx (word_mode
);
4831 for (int i
= 0; i
< overlapping
; i
++)
4832 emit_move_insn (tmp_regs
[i
],
4833 operand_subword_force (target
, i
, mode
));
4835 else if (overlapping
== -1)
4837 /* Could not determine whether there is overlap.
4838 Fail the sibcall. */
4846 emit_block_move (target
, xinner
, size
, BLOCK_OP_CALL_PARM
);
4849 else if (partial
> 0)
4851 /* Scalar partly in registers. This case is only supported
4852 for fixed-wdth modes. */
4853 int num_words
= GET_MODE_SIZE (mode
).to_constant ();
4854 num_words
/= UNITS_PER_WORD
;
4857 /* # bytes of start of argument
4858 that we must make space for but need not store. */
4859 int offset
= partial
% (PARM_BOUNDARY
/ BITS_PER_UNIT
);
4860 int args_offset
= INTVAL (args_so_far
);
4863 /* Push padding now if padding above and stack grows down,
4864 or if padding below and stack grows up.
4865 But if space already allocated, this has already been done. */
4866 if (maybe_ne (extra
, 0)
4868 && where_pad
!= PAD_NONE
4869 && where_pad
!= stack_direction
)
4870 anti_adjust_stack (gen_int_mode (extra
, Pmode
));
4872 /* If we make space by pushing it, we might as well push
4873 the real data. Otherwise, we can leave OFFSET nonzero
4874 and leave the space uninitialized. */
4878 /* Now NOT_STACK gets the number of words that we don't need to
4879 allocate on the stack. Convert OFFSET to words too. */
4880 not_stack
= (partial
- offset
) / UNITS_PER_WORD
;
4881 offset
/= UNITS_PER_WORD
;
4883 /* If the partial register-part of the arg counts in its stack size,
4884 skip the part of stack space corresponding to the registers.
4885 Otherwise, start copying to the beginning of the stack space,
4886 by setting SKIP to 0. */
4887 skip
= (reg_parm_stack_space
== 0) ? 0 : not_stack
;
4889 if (CONSTANT_P (x
) && !targetm
.legitimate_constant_p (mode
, x
))
4890 x
= validize_mem (force_const_mem (mode
, x
));
4892 /* If X is a hard register in a non-integer mode, copy it into a pseudo;
4893 SUBREGs of such registers are not allowed. */
4894 if ((REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
4895 && GET_MODE_CLASS (GET_MODE (x
)) != MODE_INT
))
4896 x
= copy_to_reg (x
);
4898 /* Loop over all the words allocated on the stack for this arg. */
4899 /* We can do it by words, because any scalar bigger than a word
4900 has a size a multiple of a word. */
4901 for (i
= num_words
- 1; i
>= not_stack
; i
--)
4902 if (i
>= not_stack
+ offset
)
4903 if (!emit_push_insn (operand_subword_force (x
, i
, mode
),
4904 word_mode
, NULL_TREE
, NULL_RTX
, align
, 0, NULL_RTX
,
4906 GEN_INT (args_offset
+ ((i
- not_stack
+ skip
)
4908 reg_parm_stack_space
, alignment_pad
, sibcall_p
))
4916 /* Push padding now if padding above and stack grows down,
4917 or if padding below and stack grows up.
4918 But if space already allocated, this has already been done. */
4919 if (maybe_ne (extra
, 0)
4921 && where_pad
!= PAD_NONE
4922 && where_pad
!= stack_direction
)
4923 anti_adjust_stack (gen_int_mode (extra
, Pmode
));
4925 #ifdef PUSH_ROUNDING
4926 if (args_addr
== 0 && targetm
.calls
.push_argument (0))
4927 emit_single_push_insn (mode
, x
, type
);
4931 addr
= simplify_gen_binary (PLUS
, Pmode
, args_addr
, args_so_far
);
4932 dest
= gen_rtx_MEM (mode
, memory_address (mode
, addr
));
4934 /* We do *not* set_mem_attributes here, because incoming arguments
4935 may overlap with sibling call outgoing arguments and we cannot
4936 allow reordering of reads from function arguments with stores
4937 to outgoing arguments of sibling calls. We do, however, want
4938 to record the alignment of the stack slot. */
4939 /* ALIGN may well be better aligned than TYPE, e.g. due to
4940 PARM_BOUNDARY. Assume the caller isn't lying. */
4941 set_mem_align (dest
, align
);
4943 emit_move_insn (dest
, x
);
4947 /* Move the partial arguments into the registers and any overlapping
4948 values that we moved into the pseudos in tmp_regs. */
4949 if (partial
> 0 && reg
!= 0)
4951 /* Handle calls that pass values in multiple non-contiguous locations.
4952 The Irix 6 ABI has examples of this. */
4953 if (GET_CODE (reg
) == PARALLEL
)
4954 emit_group_load (reg
, x
, type
, -1);
4957 gcc_assert (partial
% UNITS_PER_WORD
== 0);
4958 move_block_to_reg (REGNO (reg
), x
, nregs
- overlapping
, mode
);
4960 for (int i
= 0; i
< overlapping
; i
++)
4961 emit_move_insn (gen_rtx_REG (word_mode
, REGNO (reg
)
4962 + nregs
- overlapping
+ i
),
4968 if (maybe_ne (extra
, 0) && args_addr
== 0 && where_pad
== stack_direction
)
4969 anti_adjust_stack (gen_int_mode (extra
, Pmode
));
4971 if (alignment_pad
&& args_addr
== 0)
4972 anti_adjust_stack (alignment_pad
);
4977 /* Return X if X can be used as a subtarget in a sequence of arithmetic
4981 get_subtarget (rtx x
)
4985 /* Only registers can be subtargets. */
4987 /* Don't use hard regs to avoid extending their life. */
4988 || REGNO (x
) < FIRST_PSEUDO_REGISTER
4992 /* A subroutine of expand_assignment. Optimize FIELD op= VAL, where
4993 FIELD is a bitfield. Returns true if the optimization was successful,
4994 and there's nothing else to do. */
4997 optimize_bitfield_assignment_op (poly_uint64 pbitsize
,
4998 poly_uint64 pbitpos
,
4999 poly_uint64 pbitregion_start
,
5000 poly_uint64 pbitregion_end
,
5001 machine_mode mode1
, rtx str_rtx
,
5002 tree to
, tree src
, bool reverse
)
5004 /* str_mode is not guaranteed to be a scalar type. */
5005 machine_mode str_mode
= GET_MODE (str_rtx
);
5006 unsigned int str_bitsize
;
5011 enum tree_code code
;
5013 unsigned HOST_WIDE_INT bitsize
, bitpos
, bitregion_start
, bitregion_end
;
5014 if (mode1
!= VOIDmode
5015 || !pbitsize
.is_constant (&bitsize
)
5016 || !pbitpos
.is_constant (&bitpos
)
5017 || !pbitregion_start
.is_constant (&bitregion_start
)
5018 || !pbitregion_end
.is_constant (&bitregion_end
)
5019 || bitsize
>= BITS_PER_WORD
5020 || !GET_MODE_BITSIZE (str_mode
).is_constant (&str_bitsize
)
5021 || str_bitsize
> BITS_PER_WORD
5022 || TREE_SIDE_EFFECTS (to
)
5023 || TREE_THIS_VOLATILE (to
))
5027 if (TREE_CODE (src
) != SSA_NAME
)
5029 if (TREE_CODE (TREE_TYPE (src
)) != INTEGER_TYPE
)
5032 srcstmt
= get_gimple_for_ssa_name (src
);
5034 || TREE_CODE_CLASS (gimple_assign_rhs_code (srcstmt
)) != tcc_binary
)
5037 code
= gimple_assign_rhs_code (srcstmt
);
5039 op0
= gimple_assign_rhs1 (srcstmt
);
5041 /* If OP0 is an SSA_NAME, then we want to walk the use-def chain
5042 to find its initialization. Hopefully the initialization will
5043 be from a bitfield load. */
5044 if (TREE_CODE (op0
) == SSA_NAME
)
5046 gimple
*op0stmt
= get_gimple_for_ssa_name (op0
);
5048 /* We want to eventually have OP0 be the same as TO, which
5049 should be a bitfield. */
5051 || !is_gimple_assign (op0stmt
)
5052 || gimple_assign_rhs_code (op0stmt
) != TREE_CODE (to
))
5054 op0
= gimple_assign_rhs1 (op0stmt
);
5057 op1
= gimple_assign_rhs2 (srcstmt
);
5059 if (!operand_equal_p (to
, op0
, 0))
5062 if (MEM_P (str_rtx
))
5064 unsigned HOST_WIDE_INT offset1
;
5066 if (str_bitsize
== 0 || str_bitsize
> BITS_PER_WORD
)
5067 str_bitsize
= BITS_PER_WORD
;
5069 scalar_int_mode best_mode
;
5070 if (!get_best_mode (bitsize
, bitpos
, bitregion_start
, bitregion_end
,
5071 MEM_ALIGN (str_rtx
), str_bitsize
, false, &best_mode
))
5073 str_mode
= best_mode
;
5074 str_bitsize
= GET_MODE_BITSIZE (best_mode
);
5077 bitpos
%= str_bitsize
;
5078 offset1
= (offset1
- bitpos
) / BITS_PER_UNIT
;
5079 str_rtx
= adjust_address (str_rtx
, str_mode
, offset1
);
5081 else if (!REG_P (str_rtx
) && GET_CODE (str_rtx
) != SUBREG
)
5084 /* If the bit field covers the whole REG/MEM, store_field
5085 will likely generate better code. */
5086 if (bitsize
>= str_bitsize
)
5089 /* We can't handle fields split across multiple entities. */
5090 if (bitpos
+ bitsize
> str_bitsize
)
5093 if (reverse
? !BYTES_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
5094 bitpos
= str_bitsize
- bitpos
- bitsize
;
5100 /* For now, just optimize the case of the topmost bitfield
5101 where we don't need to do any masking and also
5102 1 bit bitfields where xor can be used.
5103 We might win by one instruction for the other bitfields
5104 too if insv/extv instructions aren't used, so that
5105 can be added later. */
5106 if ((reverse
|| bitpos
+ bitsize
!= str_bitsize
)
5107 && (bitsize
!= 1 || TREE_CODE (op1
) != INTEGER_CST
))
5110 value
= expand_expr (op1
, NULL_RTX
, str_mode
, EXPAND_NORMAL
);
5111 value
= convert_modes (str_mode
,
5112 TYPE_MODE (TREE_TYPE (op1
)), value
,
5113 TYPE_UNSIGNED (TREE_TYPE (op1
)));
5115 /* We may be accessing data outside the field, which means
5116 we can alias adjacent data. */
5117 if (MEM_P (str_rtx
))
5119 str_rtx
= shallow_copy_rtx (str_rtx
);
5120 set_mem_alias_set (str_rtx
, 0);
5121 set_mem_expr (str_rtx
, 0);
5124 if (bitsize
== 1 && (reverse
|| bitpos
+ bitsize
!= str_bitsize
))
5126 value
= expand_and (str_mode
, value
, const1_rtx
, NULL
);
5130 binop
= code
== PLUS_EXPR
? add_optab
: sub_optab
;
5132 value
= expand_shift (LSHIFT_EXPR
, str_mode
, value
, bitpos
, NULL_RTX
, 1);
5134 value
= flip_storage_order (str_mode
, value
);
5135 result
= expand_binop (str_mode
, binop
, str_rtx
,
5136 value
, str_rtx
, 1, OPTAB_WIDEN
);
5137 if (result
!= str_rtx
)
5138 emit_move_insn (str_rtx
, result
);
5143 if (TREE_CODE (op1
) != INTEGER_CST
)
5145 value
= expand_expr (op1
, NULL_RTX
, str_mode
, EXPAND_NORMAL
);
5146 value
= convert_modes (str_mode
,
5147 TYPE_MODE (TREE_TYPE (op1
)), value
,
5148 TYPE_UNSIGNED (TREE_TYPE (op1
)));
5150 /* We may be accessing data outside the field, which means
5151 we can alias adjacent data. */
5152 if (MEM_P (str_rtx
))
5154 str_rtx
= shallow_copy_rtx (str_rtx
);
5155 set_mem_alias_set (str_rtx
, 0);
5156 set_mem_expr (str_rtx
, 0);
5159 binop
= code
== BIT_IOR_EXPR
? ior_optab
: xor_optab
;
5160 if (bitpos
+ bitsize
!= str_bitsize
)
5162 rtx mask
= gen_int_mode ((HOST_WIDE_INT_1U
<< bitsize
) - 1,
5164 value
= expand_and (str_mode
, value
, mask
, NULL_RTX
);
5166 value
= expand_shift (LSHIFT_EXPR
, str_mode
, value
, bitpos
, NULL_RTX
, 1);
5168 value
= flip_storage_order (str_mode
, value
);
5169 result
= expand_binop (str_mode
, binop
, str_rtx
,
5170 value
, str_rtx
, 1, OPTAB_WIDEN
);
5171 if (result
!= str_rtx
)
5172 emit_move_insn (str_rtx
, result
);
5182 /* In the C++ memory model, consecutive bit fields in a structure are
5183 considered one memory location.
5185 Given a COMPONENT_REF EXP at position (BITPOS, OFFSET), this function
5186 returns the bit range of consecutive bits in which this COMPONENT_REF
5187 belongs. The values are returned in *BITSTART and *BITEND. *BITPOS
5188 and *OFFSET may be adjusted in the process.
5190 If the access does not need to be restricted, 0 is returned in both
5191 *BITSTART and *BITEND. */
5194 get_bit_range (poly_uint64_pod
*bitstart
, poly_uint64_pod
*bitend
, tree exp
,
5195 poly_int64_pod
*bitpos
, tree
*offset
)
5197 poly_int64 bitoffset
;
5200 gcc_assert (TREE_CODE (exp
) == COMPONENT_REF
);
5202 field
= TREE_OPERAND (exp
, 1);
5203 repr
= DECL_BIT_FIELD_REPRESENTATIVE (field
);
5204 /* If we do not have a DECL_BIT_FIELD_REPRESENTATIVE there is no
5205 need to limit the range we can access. */
5208 *bitstart
= *bitend
= 0;
5212 /* If we have a DECL_BIT_FIELD_REPRESENTATIVE but the enclosing record is
5213 part of a larger bit field, then the representative does not serve any
5214 useful purpose. This can occur in Ada. */
5215 if (handled_component_p (TREE_OPERAND (exp
, 0)))
5218 poly_int64 rbitsize
, rbitpos
;
5220 int unsignedp
, reversep
, volatilep
= 0;
5221 get_inner_reference (TREE_OPERAND (exp
, 0), &rbitsize
, &rbitpos
,
5222 &roffset
, &rmode
, &unsignedp
, &reversep
,
5224 if (!multiple_p (rbitpos
, BITS_PER_UNIT
))
5226 *bitstart
= *bitend
= 0;
5231 /* Compute the adjustment to bitpos from the offset of the field
5232 relative to the representative. DECL_FIELD_OFFSET of field and
5233 repr are the same by construction if they are not constants,
5234 see finish_bitfield_layout. */
5235 poly_uint64 field_offset
, repr_offset
;
5236 if (poly_int_tree_p (DECL_FIELD_OFFSET (field
), &field_offset
)
5237 && poly_int_tree_p (DECL_FIELD_OFFSET (repr
), &repr_offset
))
5238 bitoffset
= (field_offset
- repr_offset
) * BITS_PER_UNIT
;
5241 bitoffset
+= (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field
))
5242 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr
)));
5244 /* If the adjustment is larger than bitpos, we would have a negative bit
5245 position for the lower bound and this may wreak havoc later. Adjust
5246 offset and bitpos to make the lower bound non-negative in that case. */
5247 if (maybe_gt (bitoffset
, *bitpos
))
5249 poly_int64 adjust_bits
= upper_bound (bitoffset
, *bitpos
) - *bitpos
;
5250 poly_int64 adjust_bytes
= exact_div (adjust_bits
, BITS_PER_UNIT
);
5252 *bitpos
+= adjust_bits
;
5253 if (*offset
== NULL_TREE
)
5254 *offset
= size_int (-adjust_bytes
);
5256 *offset
= size_binop (MINUS_EXPR
, *offset
, size_int (adjust_bytes
));
5260 *bitstart
= *bitpos
- bitoffset
;
5262 *bitend
= *bitstart
+ tree_to_poly_uint64 (DECL_SIZE (repr
)) - 1;
5265 /* Returns true if BASE is a DECL that does not reside in memory and
5266 has non-BLKmode. DECL_RTL must not be a MEM; if
5267 DECL_RTL was not set yet, return false. */
5270 non_mem_decl_p (tree base
)
5273 || TREE_ADDRESSABLE (base
)
5274 || DECL_MODE (base
) == BLKmode
)
5277 if (!DECL_RTL_SET_P (base
))
5280 return (!MEM_P (DECL_RTL (base
)));
5283 /* Returns true if REF refers to an object that does not
5284 reside in memory and has non-BLKmode. */
5287 mem_ref_refers_to_non_mem_p (tree ref
)
5291 if (TREE_CODE (ref
) == MEM_REF
5292 || TREE_CODE (ref
) == TARGET_MEM_REF
)
5294 tree addr
= TREE_OPERAND (ref
, 0);
5296 if (TREE_CODE (addr
) != ADDR_EXPR
)
5299 base
= TREE_OPERAND (addr
, 0);
5304 return non_mem_decl_p (base
);
5307 /* Expand an assignment that stores the value of FROM into TO. If NONTEMPORAL
5308 is true, try generating a nontemporal store. */
5311 expand_assignment (tree to
, tree from
, bool nontemporal
)
5317 enum insn_code icode
;
5319 /* Don't crash if the lhs of the assignment was erroneous. */
5320 if (TREE_CODE (to
) == ERROR_MARK
)
5322 expand_normal (from
);
5326 /* Optimize away no-op moves without side-effects. */
5327 if (operand_equal_p (to
, from
, 0))
5330 /* Handle misaligned stores. */
5331 mode
= TYPE_MODE (TREE_TYPE (to
));
5332 if ((TREE_CODE (to
) == MEM_REF
5333 || TREE_CODE (to
) == TARGET_MEM_REF
5336 && !mem_ref_refers_to_non_mem_p (to
)
5337 && ((align
= get_object_alignment (to
))
5338 < GET_MODE_ALIGNMENT (mode
))
5339 && (((icode
= optab_handler (movmisalign_optab
, mode
))
5340 != CODE_FOR_nothing
)
5341 || targetm
.slow_unaligned_access (mode
, align
)))
5345 reg
= expand_expr (from
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
5346 /* Handle PARALLEL. */
5347 reg
= maybe_emit_group_store (reg
, TREE_TYPE (from
));
5348 reg
= force_not_mem (reg
);
5349 mem
= expand_expr (to
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
5350 if (TREE_CODE (to
) == MEM_REF
&& REF_REVERSE_STORAGE_ORDER (to
))
5351 reg
= flip_storage_order (mode
, reg
);
5353 if (icode
!= CODE_FOR_nothing
)
5355 class expand_operand ops
[2];
5357 create_fixed_operand (&ops
[0], mem
);
5358 create_input_operand (&ops
[1], reg
, mode
);
5359 /* The movmisalign<mode> pattern cannot fail, else the assignment
5360 would silently be omitted. */
5361 expand_insn (icode
, 2, ops
);
5364 store_bit_field (mem
, GET_MODE_BITSIZE (mode
), 0, 0, 0, mode
, reg
,
5369 /* Assignment of a structure component needs special treatment
5370 if the structure component's rtx is not simply a MEM.
5371 Assignment of an array element at a constant index, and assignment of
5372 an array element in an unaligned packed structure field, has the same
5373 problem. Same for (partially) storing into a non-memory object. */
5374 if (handled_component_p (to
)
5375 || (TREE_CODE (to
) == MEM_REF
5376 && (REF_REVERSE_STORAGE_ORDER (to
)
5377 || mem_ref_refers_to_non_mem_p (to
)))
5378 || TREE_CODE (TREE_TYPE (to
)) == ARRAY_TYPE
)
5381 poly_int64 bitsize
, bitpos
;
5382 poly_uint64 bitregion_start
= 0;
5383 poly_uint64 bitregion_end
= 0;
5385 int unsignedp
, reversep
, volatilep
= 0;
5389 tem
= get_inner_reference (to
, &bitsize
, &bitpos
, &offset
, &mode1
,
5390 &unsignedp
, &reversep
, &volatilep
);
5392 /* Make sure bitpos is not negative, it can wreak havoc later. */
5393 if (maybe_lt (bitpos
, 0))
5395 gcc_assert (offset
== NULL_TREE
);
5396 offset
= size_int (bits_to_bytes_round_down (bitpos
));
5397 bitpos
= num_trailing_bits (bitpos
);
5400 if (TREE_CODE (to
) == COMPONENT_REF
5401 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (to
, 1)))
5402 get_bit_range (&bitregion_start
, &bitregion_end
, to
, &bitpos
, &offset
);
5403 /* The C++ memory model naturally applies to byte-aligned fields.
5404 However, if we do not have a DECL_BIT_FIELD_TYPE but BITPOS or
5405 BITSIZE are not byte-aligned, there is no need to limit the range
5406 we can access. This can occur with packed structures in Ada. */
5407 else if (maybe_gt (bitsize
, 0)
5408 && multiple_p (bitsize
, BITS_PER_UNIT
)
5409 && multiple_p (bitpos
, BITS_PER_UNIT
))
5411 bitregion_start
= bitpos
;
5412 bitregion_end
= bitpos
+ bitsize
- 1;
5415 to_rtx
= expand_expr (tem
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
5417 /* If the field has a mode, we want to access it in the
5418 field's mode, not the computed mode.
5419 If a MEM has VOIDmode (external with incomplete type),
5420 use BLKmode for it instead. */
5423 if (mode1
!= VOIDmode
)
5424 to_rtx
= adjust_address (to_rtx
, mode1
, 0);
5425 else if (GET_MODE (to_rtx
) == VOIDmode
)
5426 to_rtx
= adjust_address (to_rtx
, BLKmode
, 0);
5431 machine_mode address_mode
;
5434 if (!MEM_P (to_rtx
))
5436 /* We can get constant negative offsets into arrays with broken
5437 user code. Translate this to a trap instead of ICEing. */
5438 gcc_assert (TREE_CODE (offset
) == INTEGER_CST
);
5439 expand_builtin_trap ();
5440 to_rtx
= gen_rtx_MEM (BLKmode
, const0_rtx
);
5443 offset_rtx
= expand_expr (offset
, NULL_RTX
, VOIDmode
, EXPAND_SUM
);
5444 address_mode
= get_address_mode (to_rtx
);
5445 if (GET_MODE (offset_rtx
) != address_mode
)
5447 /* We cannot be sure that the RTL in offset_rtx is valid outside
5448 of a memory address context, so force it into a register
5449 before attempting to convert it to the desired mode. */
5450 offset_rtx
= force_operand (offset_rtx
, NULL_RTX
);
5451 offset_rtx
= convert_to_mode (address_mode
, offset_rtx
, 0);
5454 /* If we have an expression in OFFSET_RTX and a non-zero
5455 byte offset in BITPOS, adding the byte offset before the
5456 OFFSET_RTX results in better intermediate code, which makes
5457 later rtl optimization passes perform better.
5459 We prefer intermediate code like this:
5461 r124:DI=r123:DI+0x18
5466 r124:DI=r123:DI+0x10
5467 [r124:DI+0x8]=r121:DI
5469 This is only done for aligned data values, as these can
5470 be expected to result in single move instructions. */
5472 if (mode1
!= VOIDmode
5473 && maybe_ne (bitpos
, 0)
5474 && maybe_gt (bitsize
, 0)
5475 && multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
5476 && multiple_p (bitpos
, bitsize
)
5477 && multiple_p (bitsize
, GET_MODE_ALIGNMENT (mode1
))
5478 && MEM_ALIGN (to_rtx
) >= GET_MODE_ALIGNMENT (mode1
))
5480 to_rtx
= adjust_address (to_rtx
, mode1
, bytepos
);
5481 bitregion_start
= 0;
5482 if (known_ge (bitregion_end
, poly_uint64 (bitpos
)))
5483 bitregion_end
-= bitpos
;
5487 to_rtx
= offset_address (to_rtx
, offset_rtx
,
5488 highest_pow2_factor_for_target (to
,
5492 /* No action is needed if the target is not a memory and the field
5493 lies completely outside that target. This can occur if the source
5494 code contains an out-of-bounds access to a small array. */
5496 && GET_MODE (to_rtx
) != BLKmode
5497 && known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (to_rtx
))))
5499 expand_normal (from
);
5502 /* Handle expand_expr of a complex value returning a CONCAT. */
5503 else if (GET_CODE (to_rtx
) == CONCAT
)
5505 machine_mode to_mode
= GET_MODE (to_rtx
);
5506 gcc_checking_assert (COMPLEX_MODE_P (to_mode
));
5507 poly_int64 mode_bitsize
= GET_MODE_BITSIZE (to_mode
);
5508 unsigned short inner_bitsize
= GET_MODE_UNIT_BITSIZE (to_mode
);
5509 if (TYPE_MODE (TREE_TYPE (from
)) == to_mode
5510 && known_eq (bitpos
, 0)
5511 && known_eq (bitsize
, mode_bitsize
))
5512 result
= store_expr (from
, to_rtx
, false, nontemporal
, reversep
);
5513 else if (TYPE_MODE (TREE_TYPE (from
)) == GET_MODE_INNER (to_mode
)
5514 && known_eq (bitsize
, inner_bitsize
)
5515 && (known_eq (bitpos
, 0)
5516 || known_eq (bitpos
, inner_bitsize
)))
5517 result
= store_expr (from
, XEXP (to_rtx
, maybe_ne (bitpos
, 0)),
5518 false, nontemporal
, reversep
);
5519 else if (known_le (bitpos
+ bitsize
, inner_bitsize
))
5520 result
= store_field (XEXP (to_rtx
, 0), bitsize
, bitpos
,
5521 bitregion_start
, bitregion_end
,
5522 mode1
, from
, get_alias_set (to
),
5523 nontemporal
, reversep
);
5524 else if (known_ge (bitpos
, inner_bitsize
))
5525 result
= store_field (XEXP (to_rtx
, 1), bitsize
,
5526 bitpos
- inner_bitsize
,
5527 bitregion_start
, bitregion_end
,
5528 mode1
, from
, get_alias_set (to
),
5529 nontemporal
, reversep
);
5530 else if (known_eq (bitpos
, 0) && known_eq (bitsize
, mode_bitsize
))
5532 result
= expand_normal (from
);
5533 if (GET_CODE (result
) == CONCAT
)
5535 to_mode
= GET_MODE_INNER (to_mode
);
5536 machine_mode from_mode
= GET_MODE_INNER (GET_MODE (result
));
5538 = simplify_gen_subreg (to_mode
, XEXP (result
, 0),
5541 = simplify_gen_subreg (to_mode
, XEXP (result
, 1),
5543 if (!from_real
|| !from_imag
)
5544 goto concat_store_slow
;
5545 emit_move_insn (XEXP (to_rtx
, 0), from_real
);
5546 emit_move_insn (XEXP (to_rtx
, 1), from_imag
);
5550 machine_mode from_mode
5551 = GET_MODE (result
) == VOIDmode
5552 ? TYPE_MODE (TREE_TYPE (from
))
5553 : GET_MODE (result
);
5556 from_rtx
= change_address (result
, to_mode
, NULL_RTX
);
5559 = simplify_gen_subreg (to_mode
, result
, from_mode
, 0);
5562 emit_move_insn (XEXP (to_rtx
, 0),
5563 read_complex_part (from_rtx
, false));
5564 emit_move_insn (XEXP (to_rtx
, 1),
5565 read_complex_part (from_rtx
, true));
5569 to_mode
= GET_MODE_INNER (to_mode
);
5571 = simplify_gen_subreg (to_mode
, result
, from_mode
, 0);
5573 = simplify_gen_subreg (to_mode
, result
, from_mode
,
5574 GET_MODE_SIZE (to_mode
));
5575 if (!from_real
|| !from_imag
)
5576 goto concat_store_slow
;
5577 emit_move_insn (XEXP (to_rtx
, 0), from_real
);
5578 emit_move_insn (XEXP (to_rtx
, 1), from_imag
);
5585 rtx temp
= assign_stack_temp (GET_MODE (to_rtx
),
5586 GET_MODE_SIZE (GET_MODE (to_rtx
)));
5587 write_complex_part (temp
, XEXP (to_rtx
, 0), false);
5588 write_complex_part (temp
, XEXP (to_rtx
, 1), true);
5589 result
= store_field (temp
, bitsize
, bitpos
,
5590 bitregion_start
, bitregion_end
,
5591 mode1
, from
, get_alias_set (to
),
5592 nontemporal
, reversep
);
5593 emit_move_insn (XEXP (to_rtx
, 0), read_complex_part (temp
, false));
5594 emit_move_insn (XEXP (to_rtx
, 1), read_complex_part (temp
, true));
5597 /* For calls to functions returning variable length structures, if TO_RTX
5598 is not a MEM, go through a MEM because we must not create temporaries
5600 else if (!MEM_P (to_rtx
)
5601 && TREE_CODE (from
) == CALL_EXPR
5602 && COMPLETE_TYPE_P (TREE_TYPE (from
))
5603 && TREE_CODE (TYPE_SIZE (TREE_TYPE (from
))) != INTEGER_CST
)
5605 rtx temp
= assign_stack_temp (GET_MODE (to_rtx
),
5606 GET_MODE_SIZE (GET_MODE (to_rtx
)));
5607 result
= store_field (temp
, bitsize
, bitpos
, bitregion_start
,
5608 bitregion_end
, mode1
, from
, get_alias_set (to
),
5609 nontemporal
, reversep
);
5610 emit_move_insn (to_rtx
, temp
);
5616 /* If the field is at offset zero, we could have been given the
5617 DECL_RTX of the parent struct. Don't munge it. */
5618 to_rtx
= shallow_copy_rtx (to_rtx
);
5619 set_mem_attributes_minus_bitpos (to_rtx
, to
, 0, bitpos
);
5621 MEM_VOLATILE_P (to_rtx
) = 1;
5624 gcc_checking_assert (known_ge (bitpos
, 0));
5625 if (optimize_bitfield_assignment_op (bitsize
, bitpos
,
5626 bitregion_start
, bitregion_end
,
5627 mode1
, to_rtx
, to
, from
,
5630 else if (SUBREG_P (to_rtx
)
5631 && SUBREG_PROMOTED_VAR_P (to_rtx
))
5633 /* If to_rtx is a promoted subreg, we need to zero or sign
5634 extend the value afterwards. */
5635 if (TREE_CODE (to
) == MEM_REF
5636 && TYPE_MODE (TREE_TYPE (from
)) != BLKmode
5637 && !REF_REVERSE_STORAGE_ORDER (to
)
5638 && known_eq (bitpos
, 0)
5639 && known_eq (bitsize
, GET_MODE_BITSIZE (GET_MODE (to_rtx
))))
5640 result
= store_expr (from
, to_rtx
, 0, nontemporal
, false);
5644 = lowpart_subreg (subreg_unpromoted_mode (to_rtx
),
5645 SUBREG_REG (to_rtx
),
5646 subreg_promoted_mode (to_rtx
));
5647 result
= store_field (to_rtx1
, bitsize
, bitpos
,
5648 bitregion_start
, bitregion_end
,
5649 mode1
, from
, get_alias_set (to
),
5650 nontemporal
, reversep
);
5651 convert_move (SUBREG_REG (to_rtx
), to_rtx1
,
5652 SUBREG_PROMOTED_SIGN (to_rtx
));
5656 result
= store_field (to_rtx
, bitsize
, bitpos
,
5657 bitregion_start
, bitregion_end
,
5658 mode1
, from
, get_alias_set (to
),
5659 nontemporal
, reversep
);
5663 preserve_temp_slots (result
);
5668 /* If the rhs is a function call and its value is not an aggregate,
5669 call the function before we start to compute the lhs.
5670 This is needed for correct code for cases such as
5671 val = setjmp (buf) on machines where reference to val
5672 requires loading up part of an address in a separate insn.
5674 Don't do this if TO is a VAR_DECL or PARM_DECL whose DECL_RTL is REG
5675 since it might be a promoted variable where the zero- or sign- extension
5676 needs to be done. Handling this in the normal way is safe because no
5677 computation is done before the call. The same is true for SSA names. */
5678 if (TREE_CODE (from
) == CALL_EXPR
&& ! aggregate_value_p (from
, from
)
5679 && COMPLETE_TYPE_P (TREE_TYPE (from
))
5680 && TREE_CODE (TYPE_SIZE (TREE_TYPE (from
))) == INTEGER_CST
5682 || TREE_CODE (to
) == PARM_DECL
5683 || TREE_CODE (to
) == RESULT_DECL
)
5684 && REG_P (DECL_RTL (to
)))
5685 || TREE_CODE (to
) == SSA_NAME
))
5690 value
= expand_normal (from
);
5693 to_rtx
= expand_expr (to
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
5695 /* Handle calls that return values in multiple non-contiguous locations.
5696 The Irix 6 ABI has examples of this. */
5697 if (GET_CODE (to_rtx
) == PARALLEL
)
5699 if (GET_CODE (value
) == PARALLEL
)
5700 emit_group_move (to_rtx
, value
);
5702 emit_group_load (to_rtx
, value
, TREE_TYPE (from
),
5703 int_size_in_bytes (TREE_TYPE (from
)));
5705 else if (GET_CODE (value
) == PARALLEL
)
5706 emit_group_store (to_rtx
, value
, TREE_TYPE (from
),
5707 int_size_in_bytes (TREE_TYPE (from
)));
5708 else if (GET_MODE (to_rtx
) == BLKmode
)
5710 /* Handle calls that return BLKmode values in registers. */
5712 copy_blkmode_from_reg (to_rtx
, value
, TREE_TYPE (from
));
5714 emit_block_move (to_rtx
, value
, expr_size (from
), BLOCK_OP_NORMAL
);
5718 if (POINTER_TYPE_P (TREE_TYPE (to
)))
5719 value
= convert_memory_address_addr_space
5720 (as_a
<scalar_int_mode
> (GET_MODE (to_rtx
)), value
,
5721 TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (to
))));
5723 emit_move_insn (to_rtx
, value
);
5726 preserve_temp_slots (to_rtx
);
5731 /* Ordinary treatment. Expand TO to get a REG or MEM rtx. */
5732 to_rtx
= expand_expr (to
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
5734 /* Don't move directly into a return register. */
5735 if (TREE_CODE (to
) == RESULT_DECL
5736 && (REG_P (to_rtx
) || GET_CODE (to_rtx
) == PARALLEL
))
5742 /* If the source is itself a return value, it still is in a pseudo at
5743 this point so we can move it back to the return register directly. */
5745 && TYPE_MODE (TREE_TYPE (from
)) == BLKmode
5746 && TREE_CODE (from
) != CALL_EXPR
)
5747 temp
= copy_blkmode_to_reg (GET_MODE (to_rtx
), from
);
5749 temp
= expand_expr (from
, NULL_RTX
, GET_MODE (to_rtx
), EXPAND_NORMAL
);
5751 /* Handle calls that return values in multiple non-contiguous locations.
5752 The Irix 6 ABI has examples of this. */
5753 if (GET_CODE (to_rtx
) == PARALLEL
)
5755 if (GET_CODE (temp
) == PARALLEL
)
5756 emit_group_move (to_rtx
, temp
);
5758 emit_group_load (to_rtx
, temp
, TREE_TYPE (from
),
5759 int_size_in_bytes (TREE_TYPE (from
)));
5762 emit_move_insn (to_rtx
, temp
);
5764 preserve_temp_slots (to_rtx
);
5769 /* In case we are returning the contents of an object which overlaps
5770 the place the value is being stored, use a safe function when copying
5771 a value through a pointer into a structure value return block. */
5772 if (TREE_CODE (to
) == RESULT_DECL
5773 && TREE_CODE (from
) == INDIRECT_REF
5774 && ADDR_SPACE_GENERIC_P
5775 (TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (from
, 0)))))
5776 && refs_may_alias_p (to
, from
)
5777 && cfun
->returns_struct
5778 && !cfun
->returns_pcc_struct
)
5783 size
= expr_size (from
);
5784 from_rtx
= expand_normal (from
);
5786 emit_block_move_via_libcall (XEXP (to_rtx
, 0), XEXP (from_rtx
, 0), size
);
5788 preserve_temp_slots (to_rtx
);
5793 /* Compute FROM and store the value in the rtx we got. */
5796 result
= store_expr (from
, to_rtx
, 0, nontemporal
, false);
5797 preserve_temp_slots (result
);
5802 /* Emits nontemporal store insn that moves FROM to TO. Returns true if this
5803 succeeded, false otherwise. */
5806 emit_storent_insn (rtx to
, rtx from
)
5808 class expand_operand ops
[2];
5809 machine_mode mode
= GET_MODE (to
);
5810 enum insn_code code
= optab_handler (storent_optab
, mode
);
5812 if (code
== CODE_FOR_nothing
)
5815 create_fixed_operand (&ops
[0], to
);
5816 create_input_operand (&ops
[1], from
, mode
);
5817 return maybe_expand_insn (code
, 2, ops
);
5820 /* Helper function for store_expr storing of STRING_CST. */
5823 string_cst_read_str (void *data
, void *, HOST_WIDE_INT offset
,
5824 fixed_size_mode mode
)
5826 tree str
= (tree
) data
;
5828 gcc_assert (offset
>= 0);
5829 if (offset
>= TREE_STRING_LENGTH (str
))
5832 if ((unsigned HOST_WIDE_INT
) offset
+ GET_MODE_SIZE (mode
)
5833 > (unsigned HOST_WIDE_INT
) TREE_STRING_LENGTH (str
))
5835 char *p
= XALLOCAVEC (char, GET_MODE_SIZE (mode
));
5836 size_t l
= TREE_STRING_LENGTH (str
) - offset
;
5837 memcpy (p
, TREE_STRING_POINTER (str
) + offset
, l
);
5838 memset (p
+ l
, '\0', GET_MODE_SIZE (mode
) - l
);
5839 return c_readstr (p
, as_a
<scalar_int_mode
> (mode
), false);
5842 /* The by-pieces infrastructure does not try to pick a vector mode
5843 for storing STRING_CST. */
5844 return c_readstr (TREE_STRING_POINTER (str
) + offset
,
5845 as_a
<scalar_int_mode
> (mode
), false);
5848 /* Generate code for computing expression EXP,
5849 and storing the value into TARGET.
5851 If the mode is BLKmode then we may return TARGET itself.
5852 It turns out that in BLKmode it doesn't cause a problem.
5853 because C has no operators that could combine two different
5854 assignments into the same BLKmode object with different values
5855 with no sequence point. Will other languages need this to
5858 If CALL_PARAM_P is nonzero, this is a store into a call param on the
5859 stack, and block moves may need to be treated specially.
5861 If NONTEMPORAL is true, try using a nontemporal store instruction.
5863 If REVERSE is true, the store is to be done in reverse order. */
5866 store_expr (tree exp
, rtx target
, int call_param_p
,
5867 bool nontemporal
, bool reverse
)
5870 rtx alt_rtl
= NULL_RTX
;
5871 location_t loc
= curr_insn_location ();
5872 bool shortened_string_cst
= false;
5874 if (VOID_TYPE_P (TREE_TYPE (exp
)))
5876 /* C++ can generate ?: expressions with a throw expression in one
5877 branch and an rvalue in the other. Here, we resolve attempts to
5878 store the throw expression's nonexistent result. */
5879 gcc_assert (!call_param_p
);
5880 expand_expr (exp
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5883 if (TREE_CODE (exp
) == COMPOUND_EXPR
)
5885 /* Perform first part of compound expression, then assign from second
5887 expand_expr (TREE_OPERAND (exp
, 0), const0_rtx
, VOIDmode
,
5888 call_param_p
? EXPAND_STACK_PARM
: EXPAND_NORMAL
);
5889 return store_expr (TREE_OPERAND (exp
, 1), target
,
5890 call_param_p
, nontemporal
, reverse
);
5892 else if (TREE_CODE (exp
) == COND_EXPR
&& GET_MODE (target
) == BLKmode
)
5894 /* For conditional expression, get safe form of the target. Then
5895 test the condition, doing the appropriate assignment on either
5896 side. This avoids the creation of unnecessary temporaries.
5897 For non-BLKmode, it is more efficient not to do this. */
5899 rtx_code_label
*lab1
= gen_label_rtx (), *lab2
= gen_label_rtx ();
5901 do_pending_stack_adjust ();
5903 jumpifnot (TREE_OPERAND (exp
, 0), lab1
,
5904 profile_probability::uninitialized ());
5905 store_expr (TREE_OPERAND (exp
, 1), target
, call_param_p
,
5906 nontemporal
, reverse
);
5907 emit_jump_insn (targetm
.gen_jump (lab2
));
5910 store_expr (TREE_OPERAND (exp
, 2), target
, call_param_p
,
5911 nontemporal
, reverse
);
5917 else if (GET_CODE (target
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (target
))
5918 /* If this is a scalar in a register that is stored in a wider mode
5919 than the declared mode, compute the result into its declared mode
5920 and then convert to the wider mode. Our value is the computed
5923 rtx inner_target
= 0;
5924 scalar_int_mode outer_mode
= subreg_unpromoted_mode (target
);
5925 scalar_int_mode inner_mode
= subreg_promoted_mode (target
);
5927 /* We can do the conversion inside EXP, which will often result
5928 in some optimizations. Do the conversion in two steps: first
5929 change the signedness, if needed, then the extend. But don't
5930 do this if the type of EXP is a subtype of something else
5931 since then the conversion might involve more than just
5932 converting modes. */
5933 if (INTEGRAL_TYPE_P (TREE_TYPE (exp
))
5934 && TREE_TYPE (TREE_TYPE (exp
)) == 0
5935 && GET_MODE_PRECISION (outer_mode
)
5936 == TYPE_PRECISION (TREE_TYPE (exp
)))
5938 if (!SUBREG_CHECK_PROMOTED_SIGN (target
,
5939 TYPE_UNSIGNED (TREE_TYPE (exp
))))
5941 /* Some types, e.g. Fortran's logical*4, won't have a signed
5942 version, so use the mode instead. */
5944 = (signed_or_unsigned_type_for
5945 (SUBREG_PROMOTED_SIGN (target
), TREE_TYPE (exp
)));
5947 ntype
= lang_hooks
.types
.type_for_mode
5948 (TYPE_MODE (TREE_TYPE (exp
)),
5949 SUBREG_PROMOTED_SIGN (target
));
5951 exp
= fold_convert_loc (loc
, ntype
, exp
);
5954 exp
= fold_convert_loc (loc
, lang_hooks
.types
.type_for_mode
5955 (inner_mode
, SUBREG_PROMOTED_SIGN (target
)),
5958 inner_target
= SUBREG_REG (target
);
5961 temp
= expand_expr (exp
, inner_target
, VOIDmode
,
5962 call_param_p
? EXPAND_STACK_PARM
: EXPAND_NORMAL
);
5965 /* If TEMP is a VOIDmode constant, use convert_modes to make
5966 sure that we properly convert it. */
5967 if (CONSTANT_P (temp
) && GET_MODE (temp
) == VOIDmode
)
5969 temp
= convert_modes (outer_mode
, TYPE_MODE (TREE_TYPE (exp
)),
5970 temp
, SUBREG_PROMOTED_SIGN (target
));
5971 temp
= convert_modes (inner_mode
, outer_mode
, temp
,
5972 SUBREG_PROMOTED_SIGN (target
));
5975 convert_move (SUBREG_REG (target
), temp
,
5976 SUBREG_PROMOTED_SIGN (target
));
5980 else if ((TREE_CODE (exp
) == STRING_CST
5981 || (TREE_CODE (exp
) == MEM_REF
5982 && TREE_CODE (TREE_OPERAND (exp
, 0)) == ADDR_EXPR
5983 && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp
, 0), 0))
5985 && integer_zerop (TREE_OPERAND (exp
, 1))))
5986 && !nontemporal
&& !call_param_p
5989 /* Optimize initialization of an array with a STRING_CST. */
5990 HOST_WIDE_INT exp_len
, str_copy_len
;
5992 tree str
= TREE_CODE (exp
) == STRING_CST
5993 ? exp
: TREE_OPERAND (TREE_OPERAND (exp
, 0), 0);
5995 exp_len
= int_expr_size (exp
);
5999 if (TREE_STRING_LENGTH (str
) <= 0)
6002 if (can_store_by_pieces (exp_len
, string_cst_read_str
, (void *) str
,
6003 MEM_ALIGN (target
), false))
6005 store_by_pieces (target
, exp_len
, string_cst_read_str
, (void *) str
,
6006 MEM_ALIGN (target
), false, RETURN_BEGIN
);
6010 str_copy_len
= TREE_STRING_LENGTH (str
);
6011 if ((STORE_MAX_PIECES
& (STORE_MAX_PIECES
- 1)) == 0)
6013 str_copy_len
+= STORE_MAX_PIECES
- 1;
6014 str_copy_len
&= ~(STORE_MAX_PIECES
- 1);
6016 if (str_copy_len
>= exp_len
)
6019 if (!can_store_by_pieces (str_copy_len
, string_cst_read_str
,
6020 (void *) str
, MEM_ALIGN (target
), false))
6023 dest_mem
= store_by_pieces (target
, str_copy_len
, string_cst_read_str
,
6024 (void *) str
, MEM_ALIGN (target
), false,
6026 clear_storage (adjust_address_1 (dest_mem
, BLKmode
, 0, 1, 1, 0,
6027 exp_len
- str_copy_len
),
6028 GEN_INT (exp_len
- str_copy_len
), BLOCK_OP_NORMAL
);
6036 /* If we want to use a nontemporal or a reverse order store, force the
6037 value into a register first. */
6038 tmp_target
= nontemporal
|| reverse
? NULL_RTX
: target
;
6040 if (TREE_CODE (exp
) == STRING_CST
6041 && tmp_target
== target
6042 && GET_MODE (target
) == BLKmode
6043 && TYPE_MODE (TREE_TYPE (exp
)) == BLKmode
)
6045 rtx size
= expr_size (exp
);
6046 if (CONST_INT_P (size
)
6047 && size
!= const0_rtx
6049 > ((unsigned HOST_WIDE_INT
) TREE_STRING_LENGTH (exp
) + 32)))
6051 /* If the STRING_CST has much larger array type than
6052 TREE_STRING_LENGTH, only emit the TREE_STRING_LENGTH part of
6053 it into the rodata section as the code later on will use
6054 memset zero for the remainder anyway. See PR95052. */
6055 tmp_target
= NULL_RTX
;
6056 rexp
= copy_node (exp
);
6058 = build_index_type (size_int (TREE_STRING_LENGTH (exp
) - 1));
6059 TREE_TYPE (rexp
) = build_array_type (TREE_TYPE (TREE_TYPE (exp
)),
6061 shortened_string_cst
= true;
6064 temp
= expand_expr_real (rexp
, tmp_target
, GET_MODE (target
),
6066 ? EXPAND_STACK_PARM
: EXPAND_NORMAL
),
6068 if (shortened_string_cst
)
6070 gcc_assert (MEM_P (temp
));
6071 temp
= change_address (temp
, BLKmode
, NULL_RTX
);
6075 /* If TEMP is a VOIDmode constant and the mode of the type of EXP is not
6076 the same as that of TARGET, adjust the constant. This is needed, for
6077 example, in case it is a CONST_DOUBLE or CONST_WIDE_INT and we want
6078 only a word-sized value. */
6079 if (CONSTANT_P (temp
) && GET_MODE (temp
) == VOIDmode
6080 && TREE_CODE (exp
) != ERROR_MARK
6081 && GET_MODE (target
) != TYPE_MODE (TREE_TYPE (exp
)))
6083 gcc_assert (!shortened_string_cst
);
6084 if (GET_MODE_CLASS (GET_MODE (target
))
6085 != GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (exp
)))
6086 && known_eq (GET_MODE_BITSIZE (GET_MODE (target
)),
6087 GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp
)))))
6089 rtx t
= simplify_gen_subreg (GET_MODE (target
), temp
,
6090 TYPE_MODE (TREE_TYPE (exp
)), 0);
6094 if (GET_MODE (temp
) == VOIDmode
)
6095 temp
= convert_modes (GET_MODE (target
), TYPE_MODE (TREE_TYPE (exp
)),
6096 temp
, TYPE_UNSIGNED (TREE_TYPE (exp
)));
6099 /* If value was not generated in the target, store it there.
6100 Convert the value to TARGET's type first if necessary and emit the
6101 pending incrementations that have been queued when expanding EXP.
6102 Note that we cannot emit the whole queue blindly because this will
6103 effectively disable the POST_INC optimization later.
6105 If TEMP and TARGET compare equal according to rtx_equal_p, but
6106 one or both of them are volatile memory refs, we have to distinguish
6108 - expand_expr has used TARGET. In this case, we must not generate
6109 another copy. This can be detected by TARGET being equal according
6111 - expand_expr has not used TARGET - that means that the source just
6112 happens to have the same RTX form. Since temp will have been created
6113 by expand_expr, it will compare unequal according to == .
6114 We must generate a copy in this case, to reach the correct number
6115 of volatile memory references. */
6117 if ((! rtx_equal_p (temp
, target
)
6118 || (temp
!= target
&& (side_effects_p (temp
)
6119 || side_effects_p (target
))))
6120 && TREE_CODE (exp
) != ERROR_MARK
6121 /* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
6122 but TARGET is not valid memory reference, TEMP will differ
6123 from TARGET although it is really the same location. */
6125 && rtx_equal_p (alt_rtl
, target
)
6126 && !side_effects_p (alt_rtl
)
6127 && !side_effects_p (target
))
6128 /* If there's nothing to copy, don't bother. Don't call
6129 expr_size unless necessary, because some front-ends (C++)
6130 expr_size-hook must not be given objects that are not
6131 supposed to be bit-copied or bit-initialized. */
6132 && expr_size (exp
) != const0_rtx
)
6134 if (GET_MODE (temp
) != GET_MODE (target
) && GET_MODE (temp
) != VOIDmode
)
6136 gcc_assert (!shortened_string_cst
);
6137 if (GET_MODE (target
) == BLKmode
)
6139 /* Handle calls that return BLKmode values in registers. */
6140 if (REG_P (temp
) && TREE_CODE (exp
) == CALL_EXPR
)
6141 copy_blkmode_from_reg (target
, temp
, TREE_TYPE (exp
));
6143 store_bit_field (target
,
6144 rtx_to_poly_int64 (expr_size (exp
))
6146 0, 0, 0, GET_MODE (temp
), temp
, reverse
);
6149 convert_move (target
, temp
, TYPE_UNSIGNED (TREE_TYPE (exp
)));
6152 else if (GET_MODE (temp
) == BLKmode
&& TREE_CODE (exp
) == STRING_CST
)
6154 /* Handle copying a string constant into an array. The string
6155 constant may be shorter than the array. So copy just the string's
6156 actual length, and clear the rest. First get the size of the data
6157 type of the string, which is actually the size of the target. */
6158 rtx size
= expr_size (exp
);
6160 if (CONST_INT_P (size
)
6161 && INTVAL (size
) < TREE_STRING_LENGTH (exp
))
6162 emit_block_move (target
, temp
, size
,
6164 ? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL
));
6167 machine_mode pointer_mode
6168 = targetm
.addr_space
.pointer_mode (MEM_ADDR_SPACE (target
));
6169 machine_mode address_mode
= get_address_mode (target
);
6171 /* Compute the size of the data to copy from the string. */
6173 = size_binop_loc (loc
, MIN_EXPR
,
6174 make_tree (sizetype
, size
),
6175 size_int (TREE_STRING_LENGTH (exp
)));
6177 = expand_expr (copy_size
, NULL_RTX
, VOIDmode
,
6179 ? EXPAND_STACK_PARM
: EXPAND_NORMAL
));
6180 rtx_code_label
*label
= 0;
6182 /* Copy that much. */
6183 copy_size_rtx
= convert_to_mode (pointer_mode
, copy_size_rtx
,
6184 TYPE_UNSIGNED (sizetype
));
6185 emit_block_move (target
, temp
, copy_size_rtx
,
6187 ? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL
));
6189 /* Figure out how much is left in TARGET that we have to clear.
6190 Do all calculations in pointer_mode. */
6191 poly_int64 const_copy_size
;
6192 if (poly_int_rtx_p (copy_size_rtx
, &const_copy_size
))
6194 size
= plus_constant (address_mode
, size
, -const_copy_size
);
6195 target
= adjust_address (target
, BLKmode
, const_copy_size
);
6199 size
= expand_binop (TYPE_MODE (sizetype
), sub_optab
, size
,
6200 copy_size_rtx
, NULL_RTX
, 0,
6203 if (GET_MODE (copy_size_rtx
) != address_mode
)
6204 copy_size_rtx
= convert_to_mode (address_mode
,
6206 TYPE_UNSIGNED (sizetype
));
6208 target
= offset_address (target
, copy_size_rtx
,
6209 highest_pow2_factor (copy_size
));
6210 label
= gen_label_rtx ();
6211 emit_cmp_and_jump_insns (size
, const0_rtx
, LT
, NULL_RTX
,
6212 GET_MODE (size
), 0, label
);
6215 if (size
!= const0_rtx
)
6216 clear_storage (target
, size
, BLOCK_OP_NORMAL
);
6222 else if (shortened_string_cst
)
6224 /* Handle calls that return values in multiple non-contiguous locations.
6225 The Irix 6 ABI has examples of this. */
6226 else if (GET_CODE (target
) == PARALLEL
)
6228 if (GET_CODE (temp
) == PARALLEL
)
6229 emit_group_move (target
, temp
);
6231 emit_group_load (target
, temp
, TREE_TYPE (exp
),
6232 int_size_in_bytes (TREE_TYPE (exp
)));
6234 else if (GET_CODE (temp
) == PARALLEL
)
6235 emit_group_store (target
, temp
, TREE_TYPE (exp
),
6236 int_size_in_bytes (TREE_TYPE (exp
)));
6237 else if (GET_MODE (temp
) == BLKmode
)
6238 emit_block_move (target
, temp
, expr_size (exp
),
6240 ? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL
));
6241 /* If we emit a nontemporal store, there is nothing else to do. */
6242 else if (nontemporal
&& emit_storent_insn (target
, temp
))
6247 temp
= flip_storage_order (GET_MODE (target
), temp
);
6248 temp
= force_operand (temp
, target
);
6250 emit_move_insn (target
, temp
);
6254 gcc_assert (!shortened_string_cst
);
6259 /* Return true if field F of structure TYPE is a flexible array. */
6262 flexible_array_member_p (const_tree f
, const_tree type
)
6267 return (DECL_CHAIN (f
) == NULL
6268 && TREE_CODE (tf
) == ARRAY_TYPE
6270 && TYPE_MIN_VALUE (TYPE_DOMAIN (tf
))
6271 && integer_zerop (TYPE_MIN_VALUE (TYPE_DOMAIN (tf
)))
6272 && !TYPE_MAX_VALUE (TYPE_DOMAIN (tf
))
6273 && int_size_in_bytes (type
) >= 0);
6276 /* If FOR_CTOR_P, return the number of top-level elements that a constructor
6277 must have in order for it to completely initialize a value of type TYPE.
6278 Return -1 if the number isn't known.
6280 If !FOR_CTOR_P, return an estimate of the number of scalars in TYPE. */
6282 static HOST_WIDE_INT
6283 count_type_elements (const_tree type
, bool for_ctor_p
)
6285 switch (TREE_CODE (type
))
6291 nelts
= array_type_nelts (type
);
6292 if (nelts
&& tree_fits_uhwi_p (nelts
))
6294 unsigned HOST_WIDE_INT n
;
6296 n
= tree_to_uhwi (nelts
) + 1;
6297 if (n
== 0 || for_ctor_p
)
6300 return n
* count_type_elements (TREE_TYPE (type
), false);
6302 return for_ctor_p
? -1 : 1;
6307 unsigned HOST_WIDE_INT n
;
6311 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
6312 if (TREE_CODE (f
) == FIELD_DECL
)
6315 n
+= count_type_elements (TREE_TYPE (f
), false);
6316 else if (!flexible_array_member_p (f
, type
))
6317 /* Don't count flexible arrays, which are not supposed
6318 to be initialized. */
6326 case QUAL_UNION_TYPE
:
6331 gcc_assert (!for_ctor_p
);
6332 /* Estimate the number of scalars in each field and pick the
6333 maximum. Other estimates would do instead; the idea is simply
6334 to make sure that the estimate is not sensitive to the ordering
6337 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
6338 if (TREE_CODE (f
) == FIELD_DECL
)
6340 m
= count_type_elements (TREE_TYPE (f
), false);
6341 /* If the field doesn't span the whole union, add an extra
6342 scalar for the rest. */
6343 if (simple_cst_equal (TYPE_SIZE (TREE_TYPE (f
)),
6344 TYPE_SIZE (type
)) != 1)
6357 unsigned HOST_WIDE_INT nelts
;
6358 if (TYPE_VECTOR_SUBPARTS (type
).is_constant (&nelts
))
6366 case FIXED_POINT_TYPE
:
6371 case REFERENCE_TYPE
:
6388 /* Helper for categorize_ctor_elements. Identical interface. */
6391 categorize_ctor_elements_1 (const_tree ctor
, HOST_WIDE_INT
*p_nz_elts
,
6392 HOST_WIDE_INT
*p_unique_nz_elts
,
6393 HOST_WIDE_INT
*p_init_elts
, bool *p_complete
)
6395 unsigned HOST_WIDE_INT idx
;
6396 HOST_WIDE_INT nz_elts
, unique_nz_elts
, init_elts
, num_fields
;
6397 tree value
, purpose
, elt_type
;
6399 /* Whether CTOR is a valid constant initializer, in accordance with what
6400 initializer_constant_valid_p does. If inferred from the constructor
6401 elements, true until proven otherwise. */
6402 bool const_from_elts_p
= constructor_static_from_elts_p (ctor
);
6403 bool const_p
= const_from_elts_p
? true : TREE_STATIC (ctor
);
6409 elt_type
= NULL_TREE
;
6411 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor
), idx
, purpose
, value
)
6413 HOST_WIDE_INT mult
= 1;
6415 if (purpose
&& TREE_CODE (purpose
) == RANGE_EXPR
)
6417 tree lo_index
= TREE_OPERAND (purpose
, 0);
6418 tree hi_index
= TREE_OPERAND (purpose
, 1);
6420 if (tree_fits_uhwi_p (lo_index
) && tree_fits_uhwi_p (hi_index
))
6421 mult
= (tree_to_uhwi (hi_index
)
6422 - tree_to_uhwi (lo_index
) + 1);
6425 elt_type
= TREE_TYPE (value
);
6427 switch (TREE_CODE (value
))
6431 HOST_WIDE_INT nz
= 0, unz
= 0, ic
= 0;
6433 bool const_elt_p
= categorize_ctor_elements_1 (value
, &nz
, &unz
,
6436 nz_elts
+= mult
* nz
;
6437 unique_nz_elts
+= unz
;
6438 init_elts
+= mult
* ic
;
6440 if (const_from_elts_p
&& const_p
)
6441 const_p
= const_elt_p
;
6448 if (!initializer_zerop (value
))
6457 nz_elts
+= mult
* TREE_STRING_LENGTH (value
);
6458 unique_nz_elts
+= TREE_STRING_LENGTH (value
);
6459 init_elts
+= mult
* TREE_STRING_LENGTH (value
);
6463 if (!initializer_zerop (TREE_REALPART (value
)))
6468 if (!initializer_zerop (TREE_IMAGPART (value
)))
6473 init_elts
+= 2 * mult
;
6478 /* We can only construct constant-length vectors using
6480 unsigned int nunits
= VECTOR_CST_NELTS (value
).to_constant ();
6481 for (unsigned int i
= 0; i
< nunits
; ++i
)
6483 tree v
= VECTOR_CST_ELT (value
, i
);
6484 if (!initializer_zerop (v
))
6496 HOST_WIDE_INT tc
= count_type_elements (elt_type
, false);
6497 nz_elts
+= mult
* tc
;
6498 unique_nz_elts
+= tc
;
6499 init_elts
+= mult
* tc
;
6501 if (const_from_elts_p
&& const_p
)
6503 = initializer_constant_valid_p (value
,
6505 TYPE_REVERSE_STORAGE_ORDER
6513 if (*p_complete
&& !complete_ctor_at_level_p (TREE_TYPE (ctor
),
6514 num_fields
, elt_type
))
6515 *p_complete
= false;
6517 *p_nz_elts
+= nz_elts
;
6518 *p_unique_nz_elts
+= unique_nz_elts
;
6519 *p_init_elts
+= init_elts
;
6524 /* Examine CTOR to discover:
6525 * how many scalar fields are set to nonzero values,
6526 and place it in *P_NZ_ELTS;
6527 * the same, but counting RANGE_EXPRs as multiplier of 1 instead of
6528 high - low + 1 (this can be useful for callers to determine ctors
6529 that could be cheaply initialized with - perhaps nested - loops
6530 compared to copied from huge read-only data),
6531 and place it in *P_UNIQUE_NZ_ELTS;
6532 * how many scalar fields in total are in CTOR,
6533 and place it in *P_ELT_COUNT.
6534 * whether the constructor is complete -- in the sense that every
6535 meaningful byte is explicitly given a value --
6536 and place it in *P_COMPLETE.
6538 Return whether or not CTOR is a valid static constant initializer, the same
6539 as "initializer_constant_valid_p (CTOR, TREE_TYPE (CTOR)) != 0". */
6542 categorize_ctor_elements (const_tree ctor
, HOST_WIDE_INT
*p_nz_elts
,
6543 HOST_WIDE_INT
*p_unique_nz_elts
,
6544 HOST_WIDE_INT
*p_init_elts
, bool *p_complete
)
6547 *p_unique_nz_elts
= 0;
6551 return categorize_ctor_elements_1 (ctor
, p_nz_elts
, p_unique_nz_elts
,
6552 p_init_elts
, p_complete
);
6555 /* TYPE is initialized by a constructor with NUM_ELTS elements, the last
6556 of which had type LAST_TYPE. Each element was itself a complete
6557 initializer, in the sense that every meaningful byte was explicitly
6558 given a value. Return true if the same is true for the constructor
6562 complete_ctor_at_level_p (const_tree type
, HOST_WIDE_INT num_elts
,
6563 const_tree last_type
)
6565 if (TREE_CODE (type
) == UNION_TYPE
6566 || TREE_CODE (type
) == QUAL_UNION_TYPE
)
6571 gcc_assert (num_elts
== 1 && last_type
);
6573 /* ??? We could look at each element of the union, and find the
6574 largest element. Which would avoid comparing the size of the
6575 initialized element against any tail padding in the union.
6576 Doesn't seem worth the effort... */
6577 return simple_cst_equal (TYPE_SIZE (type
), TYPE_SIZE (last_type
)) == 1;
6580 return count_type_elements (type
, true) == num_elts
;
6583 /* Return 1 if EXP contains mostly (3/4) zeros. */
6586 mostly_zeros_p (const_tree exp
)
6588 if (TREE_CODE (exp
) == CONSTRUCTOR
)
6590 HOST_WIDE_INT nz_elts
, unz_elts
, init_elts
;
6593 categorize_ctor_elements (exp
, &nz_elts
, &unz_elts
, &init_elts
,
6595 return !complete_p
|| nz_elts
< init_elts
/ 4;
6598 return initializer_zerop (exp
);
6601 /* Return 1 if EXP contains all zeros. */
6604 all_zeros_p (const_tree exp
)
6606 if (TREE_CODE (exp
) == CONSTRUCTOR
)
6608 HOST_WIDE_INT nz_elts
, unz_elts
, init_elts
;
6611 categorize_ctor_elements (exp
, &nz_elts
, &unz_elts
, &init_elts
,
6613 return nz_elts
== 0;
6616 return initializer_zerop (exp
);
6619 /* Helper function for store_constructor.
6620 TARGET, BITSIZE, BITPOS, MODE, EXP are as for store_field.
6621 CLEARED is as for store_constructor.
6622 ALIAS_SET is the alias set to use for any stores.
6623 If REVERSE is true, the store is to be done in reverse order.
6625 This provides a recursive shortcut back to store_constructor when it isn't
6626 necessary to go through store_field. This is so that we can pass through
6627 the cleared field to let store_constructor know that we may not have to
6628 clear a substructure if the outer structure has already been cleared. */
6631 store_constructor_field (rtx target
, poly_uint64 bitsize
, poly_int64 bitpos
,
6632 poly_uint64 bitregion_start
,
6633 poly_uint64 bitregion_end
,
6635 tree exp
, int cleared
,
6636 alias_set_type alias_set
, bool reverse
)
6639 poly_uint64 bytesize
;
6640 if (TREE_CODE (exp
) == CONSTRUCTOR
6641 /* We can only call store_constructor recursively if the size and
6642 bit position are on a byte boundary. */
6643 && multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
6644 && maybe_ne (bitsize
, 0U)
6645 && multiple_p (bitsize
, BITS_PER_UNIT
, &bytesize
)
6646 /* If we have a nonzero bitpos for a register target, then we just
6647 let store_field do the bitfield handling. This is unlikely to
6648 generate unnecessary clear instructions anyways. */
6649 && (known_eq (bitpos
, 0) || MEM_P (target
)))
6653 machine_mode target_mode
= GET_MODE (target
);
6654 if (target_mode
!= BLKmode
6655 && !multiple_p (bitpos
, GET_MODE_ALIGNMENT (target_mode
)))
6656 target_mode
= BLKmode
;
6657 target
= adjust_address (target
, target_mode
, bytepos
);
6661 /* Update the alias set, if required. */
6662 if (MEM_P (target
) && ! MEM_KEEP_ALIAS_SET_P (target
)
6663 && MEM_ALIAS_SET (target
) != 0)
6665 target
= copy_rtx (target
);
6666 set_mem_alias_set (target
, alias_set
);
6669 store_constructor (exp
, target
, cleared
, bytesize
, reverse
);
6672 store_field (target
, bitsize
, bitpos
, bitregion_start
, bitregion_end
, mode
,
6673 exp
, alias_set
, false, reverse
);
6677 /* Returns the number of FIELD_DECLs in TYPE. */
6680 fields_length (const_tree type
)
6682 tree t
= TYPE_FIELDS (type
);
6685 for (; t
; t
= DECL_CHAIN (t
))
6686 if (TREE_CODE (t
) == FIELD_DECL
)
6693 /* Store the value of constructor EXP into the rtx TARGET.
6694 TARGET is either a REG or a MEM; we know it cannot conflict, since
6695 safe_from_p has been called.
6696 CLEARED is true if TARGET is known to have been zero'd.
6697 SIZE is the number of bytes of TARGET we are allowed to modify: this
6698 may not be the same as the size of EXP if we are assigning to a field
6699 which has been packed to exclude padding bits.
6700 If REVERSE is true, the store is to be done in reverse order. */
6703 store_constructor (tree exp
, rtx target
, int cleared
, poly_int64 size
,
6706 tree type
= TREE_TYPE (exp
);
6707 HOST_WIDE_INT exp_size
= int_size_in_bytes (type
);
6708 poly_int64 bitregion_end
= known_gt (size
, 0) ? size
* BITS_PER_UNIT
- 1 : 0;
6710 switch (TREE_CODE (type
))
6714 case QUAL_UNION_TYPE
:
6716 unsigned HOST_WIDE_INT idx
;
6719 /* The storage order is specified for every aggregate type. */
6720 reverse
= TYPE_REVERSE_STORAGE_ORDER (type
);
6722 /* If size is zero or the target is already cleared, do nothing. */
6723 if (known_eq (size
, 0) || cleared
)
6725 /* We either clear the aggregate or indicate the value is dead. */
6726 else if ((TREE_CODE (type
) == UNION_TYPE
6727 || TREE_CODE (type
) == QUAL_UNION_TYPE
)
6728 && ! CONSTRUCTOR_ELTS (exp
))
6729 /* If the constructor is empty, clear the union. */
6731 clear_storage (target
, expr_size (exp
), BLOCK_OP_NORMAL
);
6735 /* If we are building a static constructor into a register,
6736 set the initial value as zero so we can fold the value into
6737 a constant. But if more than one register is involved,
6738 this probably loses. */
6739 else if (REG_P (target
) && TREE_STATIC (exp
)
6740 && known_le (GET_MODE_SIZE (GET_MODE (target
)),
6741 REGMODE_NATURAL_SIZE (GET_MODE (target
))))
6743 emit_move_insn (target
, CONST0_RTX (GET_MODE (target
)));
6747 /* If the constructor has fewer fields than the structure or
6748 if we are initializing the structure to mostly zeros, clear
6749 the whole structure first. Don't do this if TARGET is a
6750 register whose mode size isn't equal to SIZE since
6751 clear_storage can't handle this case. */
6752 else if (known_size_p (size
)
6753 && (((int) CONSTRUCTOR_NELTS (exp
) != fields_length (type
))
6754 || mostly_zeros_p (exp
))
6756 || known_eq (GET_MODE_SIZE (GET_MODE (target
)), size
)))
6758 clear_storage (target
, gen_int_mode (size
, Pmode
),
6763 if (REG_P (target
) && !cleared
)
6764 emit_clobber (target
);
6766 /* Store each element of the constructor into the
6767 corresponding field of TARGET. */
6768 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp
), idx
, field
, value
)
6771 HOST_WIDE_INT bitsize
;
6772 HOST_WIDE_INT bitpos
= 0;
6774 rtx to_rtx
= target
;
6776 /* Just ignore missing fields. We cleared the whole
6777 structure, above, if any fields are missing. */
6781 if (cleared
&& initializer_zerop (value
))
6784 if (tree_fits_uhwi_p (DECL_SIZE (field
)))
6785 bitsize
= tree_to_uhwi (DECL_SIZE (field
));
6789 mode
= DECL_MODE (field
);
6790 if (DECL_BIT_FIELD (field
))
6793 offset
= DECL_FIELD_OFFSET (field
);
6794 if (tree_fits_shwi_p (offset
)
6795 && tree_fits_shwi_p (bit_position (field
)))
6797 bitpos
= int_bit_position (field
);
6803 /* If this initializes a field that is smaller than a
6804 word, at the start of a word, try to widen it to a full
6805 word. This special case allows us to output C++ member
6806 function initializations in a form that the optimizers
6808 if (WORD_REGISTER_OPERATIONS
6810 && bitsize
< BITS_PER_WORD
6811 && bitpos
% BITS_PER_WORD
== 0
6812 && GET_MODE_CLASS (mode
) == MODE_INT
6813 && TREE_CODE (value
) == INTEGER_CST
6815 && bitpos
+ BITS_PER_WORD
<= exp_size
* BITS_PER_UNIT
)
6817 type
= TREE_TYPE (value
);
6819 if (TYPE_PRECISION (type
) < BITS_PER_WORD
)
6821 type
= lang_hooks
.types
.type_for_mode
6822 (word_mode
, TYPE_UNSIGNED (type
));
6823 value
= fold_convert (type
, value
);
6824 /* Make sure the bits beyond the original bitsize are zero
6825 so that we can correctly avoid extra zeroing stores in
6826 later constructor elements. */
6828 = wide_int_to_tree (type
, wi::mask (bitsize
, false,
6830 value
= fold_build2 (BIT_AND_EXPR
, type
, value
, bitsize_mask
);
6833 if (BYTES_BIG_ENDIAN
)
6835 = fold_build2 (LSHIFT_EXPR
, type
, value
,
6836 build_int_cst (type
,
6837 BITS_PER_WORD
- bitsize
));
6838 bitsize
= BITS_PER_WORD
;
6842 if (MEM_P (to_rtx
) && !MEM_KEEP_ALIAS_SET_P (to_rtx
)
6843 && DECL_NONADDRESSABLE_P (field
))
6845 to_rtx
= copy_rtx (to_rtx
);
6846 MEM_KEEP_ALIAS_SET_P (to_rtx
) = 1;
6849 store_constructor_field (to_rtx
, bitsize
, bitpos
,
6850 0, bitregion_end
, mode
,
6852 get_alias_set (TREE_TYPE (field
)),
6860 unsigned HOST_WIDE_INT i
;
6863 tree elttype
= TREE_TYPE (type
);
6865 HOST_WIDE_INT minelt
= 0;
6866 HOST_WIDE_INT maxelt
= 0;
6868 /* The storage order is specified for every aggregate type. */
6869 reverse
= TYPE_REVERSE_STORAGE_ORDER (type
);
6871 domain
= TYPE_DOMAIN (type
);
6872 const_bounds_p
= (TYPE_MIN_VALUE (domain
)
6873 && TYPE_MAX_VALUE (domain
)
6874 && tree_fits_shwi_p (TYPE_MIN_VALUE (domain
))
6875 && tree_fits_shwi_p (TYPE_MAX_VALUE (domain
)));
6877 /* If we have constant bounds for the range of the type, get them. */
6880 minelt
= tree_to_shwi (TYPE_MIN_VALUE (domain
));
6881 maxelt
= tree_to_shwi (TYPE_MAX_VALUE (domain
));
6884 /* If the constructor has fewer elements than the array, clear
6885 the whole array first. Similarly if this is static
6886 constructor of a non-BLKmode object. */
6889 else if (REG_P (target
) && TREE_STATIC (exp
))
6893 unsigned HOST_WIDE_INT idx
;
6894 HOST_WIDE_INT count
= 0, zero_count
= 0;
6895 need_to_clear
= ! const_bounds_p
;
6897 /* This loop is a more accurate version of the loop in
6898 mostly_zeros_p (it handles RANGE_EXPR in an index). It
6899 is also needed to check for missing elements. */
6900 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp
), idx
, index
, value
)
6902 HOST_WIDE_INT this_node_count
;
6907 if (index
!= NULL_TREE
&& TREE_CODE (index
) == RANGE_EXPR
)
6909 tree lo_index
= TREE_OPERAND (index
, 0);
6910 tree hi_index
= TREE_OPERAND (index
, 1);
6912 if (! tree_fits_uhwi_p (lo_index
)
6913 || ! tree_fits_uhwi_p (hi_index
))
6919 this_node_count
= (tree_to_uhwi (hi_index
)
6920 - tree_to_uhwi (lo_index
) + 1);
6923 this_node_count
= 1;
6925 count
+= this_node_count
;
6926 if (mostly_zeros_p (value
))
6927 zero_count
+= this_node_count
;
6930 /* Clear the entire array first if there are any missing
6931 elements, or if the incidence of zero elements is >=
6934 && (count
< maxelt
- minelt
+ 1
6935 || 4 * zero_count
>= 3 * count
))
6939 if (need_to_clear
&& maybe_gt (size
, 0))
6942 emit_move_insn (target
, CONST0_RTX (GET_MODE (target
)));
6944 clear_storage (target
, gen_int_mode (size
, Pmode
),
6949 if (!cleared
&& REG_P (target
))
6950 /* Inform later passes that the old value is dead. */
6951 emit_clobber (target
);
6953 /* Store each element of the constructor into the
6954 corresponding element of TARGET, determined by counting the
6956 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp
), i
, index
, value
)
6960 HOST_WIDE_INT bitpos
;
6961 rtx xtarget
= target
;
6963 if (cleared
&& initializer_zerop (value
))
6966 mode
= TYPE_MODE (elttype
);
6967 if (mode
!= BLKmode
)
6968 bitsize
= GET_MODE_BITSIZE (mode
);
6969 else if (!poly_int_tree_p (TYPE_SIZE (elttype
), &bitsize
))
6972 if (index
!= NULL_TREE
&& TREE_CODE (index
) == RANGE_EXPR
)
6974 tree lo_index
= TREE_OPERAND (index
, 0);
6975 tree hi_index
= TREE_OPERAND (index
, 1);
6976 rtx index_r
, pos_rtx
;
6977 HOST_WIDE_INT lo
, hi
, count
;
6980 /* If the range is constant and "small", unroll the loop. */
6982 && tree_fits_shwi_p (lo_index
)
6983 && tree_fits_shwi_p (hi_index
)
6984 && (lo
= tree_to_shwi (lo_index
),
6985 hi
= tree_to_shwi (hi_index
),
6986 count
= hi
- lo
+ 1,
6989 || (tree_fits_uhwi_p (TYPE_SIZE (elttype
))
6990 && (tree_to_uhwi (TYPE_SIZE (elttype
)) * count
6993 lo
-= minelt
; hi
-= minelt
;
6994 for (; lo
<= hi
; lo
++)
6996 bitpos
= lo
* tree_to_shwi (TYPE_SIZE (elttype
));
6999 && !MEM_KEEP_ALIAS_SET_P (target
)
7000 && TREE_CODE (type
) == ARRAY_TYPE
7001 && TYPE_NONALIASED_COMPONENT (type
))
7003 target
= copy_rtx (target
);
7004 MEM_KEEP_ALIAS_SET_P (target
) = 1;
7007 store_constructor_field
7008 (target
, bitsize
, bitpos
, 0, bitregion_end
,
7009 mode
, value
, cleared
,
7010 get_alias_set (elttype
), reverse
);
7015 rtx_code_label
*loop_start
= gen_label_rtx ();
7016 rtx_code_label
*loop_end
= gen_label_rtx ();
7019 expand_normal (hi_index
);
7021 index
= build_decl (EXPR_LOCATION (exp
),
7022 VAR_DECL
, NULL_TREE
, domain
);
7023 index_r
= gen_reg_rtx (promote_decl_mode (index
, NULL
));
7024 SET_DECL_RTL (index
, index_r
);
7025 store_expr (lo_index
, index_r
, 0, false, reverse
);
7027 /* Build the head of the loop. */
7028 do_pending_stack_adjust ();
7029 emit_label (loop_start
);
7031 /* Assign value to element index. */
7033 fold_convert (ssizetype
,
7034 fold_build2 (MINUS_EXPR
,
7037 TYPE_MIN_VALUE (domain
)));
7040 size_binop (MULT_EXPR
, position
,
7041 fold_convert (ssizetype
,
7042 TYPE_SIZE_UNIT (elttype
)));
7044 pos_rtx
= expand_normal (position
);
7045 xtarget
= offset_address (target
, pos_rtx
,
7046 highest_pow2_factor (position
));
7047 xtarget
= adjust_address (xtarget
, mode
, 0);
7048 if (TREE_CODE (value
) == CONSTRUCTOR
)
7049 store_constructor (value
, xtarget
, cleared
,
7050 exact_div (bitsize
, BITS_PER_UNIT
),
7053 store_expr (value
, xtarget
, 0, false, reverse
);
7055 /* Generate a conditional jump to exit the loop. */
7056 exit_cond
= build2 (LT_EXPR
, integer_type_node
,
7058 jumpif (exit_cond
, loop_end
,
7059 profile_probability::uninitialized ());
7061 /* Update the loop counter, and jump to the head of
7063 expand_assignment (index
,
7064 build2 (PLUS_EXPR
, TREE_TYPE (index
),
7065 index
, integer_one_node
),
7068 emit_jump (loop_start
);
7070 /* Build the end of the loop. */
7071 emit_label (loop_end
);
7074 else if ((index
!= 0 && ! tree_fits_shwi_p (index
))
7075 || ! tree_fits_uhwi_p (TYPE_SIZE (elttype
)))
7080 index
= ssize_int (1);
7083 index
= fold_convert (ssizetype
,
7084 fold_build2 (MINUS_EXPR
,
7087 TYPE_MIN_VALUE (domain
)));
7090 size_binop (MULT_EXPR
, index
,
7091 fold_convert (ssizetype
,
7092 TYPE_SIZE_UNIT (elttype
)));
7093 xtarget
= offset_address (target
,
7094 expand_normal (position
),
7095 highest_pow2_factor (position
));
7096 xtarget
= adjust_address (xtarget
, mode
, 0);
7097 store_expr (value
, xtarget
, 0, false, reverse
);
7102 bitpos
= ((tree_to_shwi (index
) - minelt
)
7103 * tree_to_uhwi (TYPE_SIZE (elttype
)));
7105 bitpos
= (i
* tree_to_uhwi (TYPE_SIZE (elttype
)));
7107 if (MEM_P (target
) && !MEM_KEEP_ALIAS_SET_P (target
)
7108 && TREE_CODE (type
) == ARRAY_TYPE
7109 && TYPE_NONALIASED_COMPONENT (type
))
7111 target
= copy_rtx (target
);
7112 MEM_KEEP_ALIAS_SET_P (target
) = 1;
7114 store_constructor_field (target
, bitsize
, bitpos
, 0,
7115 bitregion_end
, mode
, value
,
7116 cleared
, get_alias_set (elttype
),
7125 unsigned HOST_WIDE_INT idx
;
7126 constructor_elt
*ce
;
7129 insn_code icode
= CODE_FOR_nothing
;
7131 tree elttype
= TREE_TYPE (type
);
7132 int elt_size
= vector_element_bits (type
);
7133 machine_mode eltmode
= TYPE_MODE (elttype
);
7134 HOST_WIDE_INT bitsize
;
7135 HOST_WIDE_INT bitpos
;
7136 rtvec vector
= NULL
;
7138 unsigned HOST_WIDE_INT const_n_elts
;
7139 alias_set_type alias
;
7140 bool vec_vec_init_p
= false;
7141 machine_mode mode
= GET_MODE (target
);
7143 gcc_assert (eltmode
!= BLKmode
);
7145 /* Try using vec_duplicate_optab for uniform vectors. */
7146 if (!TREE_SIDE_EFFECTS (exp
)
7147 && VECTOR_MODE_P (mode
)
7148 && eltmode
== GET_MODE_INNER (mode
)
7149 && ((icode
= optab_handler (vec_duplicate_optab
, mode
))
7150 != CODE_FOR_nothing
)
7151 && (elt
= uniform_vector_p (exp
))
7152 && !VECTOR_TYPE_P (TREE_TYPE (elt
)))
7154 class expand_operand ops
[2];
7155 create_output_operand (&ops
[0], target
, mode
);
7156 create_input_operand (&ops
[1], expand_normal (elt
), eltmode
);
7157 expand_insn (icode
, 2, ops
);
7158 if (!rtx_equal_p (target
, ops
[0].value
))
7159 emit_move_insn (target
, ops
[0].value
);
7163 n_elts
= TYPE_VECTOR_SUBPARTS (type
);
7165 && VECTOR_MODE_P (mode
)
7166 && n_elts
.is_constant (&const_n_elts
))
7168 machine_mode emode
= eltmode
;
7169 bool vector_typed_elts_p
= false;
7171 if (CONSTRUCTOR_NELTS (exp
)
7172 && (TREE_CODE (TREE_TYPE (CONSTRUCTOR_ELT (exp
, 0)->value
))
7175 tree etype
= TREE_TYPE (CONSTRUCTOR_ELT (exp
, 0)->value
);
7176 gcc_assert (known_eq (CONSTRUCTOR_NELTS (exp
)
7177 * TYPE_VECTOR_SUBPARTS (etype
),
7179 emode
= TYPE_MODE (etype
);
7180 vector_typed_elts_p
= true;
7182 icode
= convert_optab_handler (vec_init_optab
, mode
, emode
);
7183 if (icode
!= CODE_FOR_nothing
)
7185 unsigned int n
= const_n_elts
;
7187 if (vector_typed_elts_p
)
7189 n
= CONSTRUCTOR_NELTS (exp
);
7190 vec_vec_init_p
= true;
7192 vector
= rtvec_alloc (n
);
7193 for (unsigned int k
= 0; k
< n
; k
++)
7194 RTVEC_ELT (vector
, k
) = CONST0_RTX (emode
);
7198 /* Compute the size of the elements in the CTOR. It differs
7199 from the size of the vector type elements only when the
7200 CTOR elements are vectors themselves. */
7201 tree val_type
= (CONSTRUCTOR_NELTS (exp
) != 0
7202 ? TREE_TYPE (CONSTRUCTOR_ELT (exp
, 0)->value
)
7204 if (VECTOR_TYPE_P (val_type
))
7205 bitsize
= tree_to_uhwi (TYPE_SIZE (val_type
));
7209 /* If the constructor has fewer elements than the vector,
7210 clear the whole array first. Similarly if this is static
7211 constructor of a non-BLKmode object. */
7214 else if (REG_P (target
) && TREE_STATIC (exp
))
7218 unsigned HOST_WIDE_INT count
= 0, zero_count
= 0;
7221 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp
), idx
, value
)
7223 int n_elts_here
= bitsize
/ elt_size
;
7224 count
+= n_elts_here
;
7225 if (mostly_zeros_p (value
))
7226 zero_count
+= n_elts_here
;
7229 /* Clear the entire vector first if there are any missing elements,
7230 or if the incidence of zero elements is >= 75%. */
7231 need_to_clear
= (maybe_lt (count
, n_elts
)
7232 || 4 * zero_count
>= 3 * count
);
7235 if (need_to_clear
&& maybe_gt (size
, 0) && !vector
)
7238 emit_move_insn (target
, CONST0_RTX (mode
));
7240 clear_storage (target
, gen_int_mode (size
, Pmode
),
7245 /* Inform later passes that the old value is dead. */
7246 if (!cleared
&& !vector
&& REG_P (target
))
7247 emit_move_insn (target
, CONST0_RTX (mode
));
7250 alias
= MEM_ALIAS_SET (target
);
7252 alias
= get_alias_set (elttype
);
7254 /* Store each element of the constructor into the corresponding
7255 element of TARGET, determined by counting the elements. */
7256 for (idx
= 0, i
= 0;
7257 vec_safe_iterate (CONSTRUCTOR_ELTS (exp
), idx
, &ce
);
7258 idx
++, i
+= bitsize
/ elt_size
)
7260 HOST_WIDE_INT eltpos
;
7261 tree value
= ce
->value
;
7263 if (cleared
&& initializer_zerop (value
))
7267 eltpos
= tree_to_uhwi (ce
->index
);
7275 gcc_assert (ce
->index
== NULL_TREE
);
7276 gcc_assert (TREE_CODE (TREE_TYPE (value
)) == VECTOR_TYPE
);
7280 gcc_assert (TREE_CODE (TREE_TYPE (value
)) != VECTOR_TYPE
);
7281 RTVEC_ELT (vector
, eltpos
) = expand_normal (value
);
7285 machine_mode value_mode
7286 = (TREE_CODE (TREE_TYPE (value
)) == VECTOR_TYPE
7287 ? TYPE_MODE (TREE_TYPE (value
)) : eltmode
);
7288 bitpos
= eltpos
* elt_size
;
7289 store_constructor_field (target
, bitsize
, bitpos
, 0,
7290 bitregion_end
, value_mode
,
7291 value
, cleared
, alias
, reverse
);
7296 emit_insn (GEN_FCN (icode
) (target
,
7297 gen_rtx_PARALLEL (mode
, vector
)));
7306 /* Store the value of EXP (an expression tree)
7307 into a subfield of TARGET which has mode MODE and occupies
7308 BITSIZE bits, starting BITPOS bits from the start of TARGET.
7309 If MODE is VOIDmode, it means that we are storing into a bit-field.
7311 BITREGION_START is bitpos of the first bitfield in this region.
7312 BITREGION_END is the bitpos of the ending bitfield in this region.
7313 These two fields are 0, if the C++ memory model does not apply,
7314 or we are not interested in keeping track of bitfield regions.
7316 Always return const0_rtx unless we have something particular to
7319 ALIAS_SET is the alias set for the destination. This value will
7320 (in general) be different from that for TARGET, since TARGET is a
7321 reference to the containing structure.
7323 If NONTEMPORAL is true, try generating a nontemporal store.
7325 If REVERSE is true, the store is to be done in reverse order. */
7328 store_field (rtx target
, poly_int64 bitsize
, poly_int64 bitpos
,
7329 poly_uint64 bitregion_start
, poly_uint64 bitregion_end
,
7330 machine_mode mode
, tree exp
,
7331 alias_set_type alias_set
, bool nontemporal
, bool reverse
)
7333 if (TREE_CODE (exp
) == ERROR_MARK
)
7336 /* If we have nothing to store, do nothing unless the expression has
7337 side-effects. Don't do that for zero sized addressable lhs of
7339 if (known_eq (bitsize
, 0)
7340 && (!TREE_ADDRESSABLE (TREE_TYPE (exp
))
7341 || TREE_CODE (exp
) != CALL_EXPR
))
7342 return expand_expr (exp
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7344 if (GET_CODE (target
) == CONCAT
)
7346 /* We're storing into a struct containing a single __complex. */
7348 gcc_assert (known_eq (bitpos
, 0));
7349 return store_expr (exp
, target
, 0, nontemporal
, reverse
);
7352 /* If the structure is in a register or if the component
7353 is a bit field, we cannot use addressing to access it.
7354 Use bit-field techniques or SUBREG to store in it. */
7356 poly_int64 decl_bitsize
;
7357 if (mode
== VOIDmode
7358 || (mode
!= BLKmode
&& ! direct_store
[(int) mode
]
7359 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_INT
7360 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_FLOAT
)
7362 || GET_CODE (target
) == SUBREG
7363 /* If the field isn't aligned enough to store as an ordinary memref,
7364 store it as a bit field. */
7366 && ((((MEM_ALIGN (target
) < GET_MODE_ALIGNMENT (mode
))
7367 || !multiple_p (bitpos
, GET_MODE_ALIGNMENT (mode
)))
7368 && targetm
.slow_unaligned_access (mode
, MEM_ALIGN (target
)))
7369 || !multiple_p (bitpos
, BITS_PER_UNIT
)))
7370 || (known_size_p (bitsize
)
7372 && maybe_gt (GET_MODE_BITSIZE (mode
), bitsize
))
7373 /* If the RHS and field are a constant size and the size of the
7374 RHS isn't the same size as the bitfield, we must use bitfield
7376 || (known_size_p (bitsize
)
7377 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp
)))
7378 && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp
))),
7380 /* Except for initialization of full bytes from a CONSTRUCTOR, which
7381 we will handle specially below. */
7382 && !(TREE_CODE (exp
) == CONSTRUCTOR
7383 && multiple_p (bitsize
, BITS_PER_UNIT
))
7384 /* And except for bitwise copying of TREE_ADDRESSABLE types,
7385 where the FIELD_DECL has the right bitsize, but TREE_TYPE (exp)
7386 includes some extra padding. store_expr / expand_expr will in
7387 that case call get_inner_reference that will have the bitsize
7388 we check here and thus the block move will not clobber the
7389 padding that shouldn't be clobbered. In the future we could
7390 replace the TREE_ADDRESSABLE check with a check that
7391 get_base_address needs to live in memory. */
7392 && (!TREE_ADDRESSABLE (TREE_TYPE (exp
))
7393 || TREE_CODE (exp
) != COMPONENT_REF
7394 || !multiple_p (bitsize
, BITS_PER_UNIT
)
7395 || !multiple_p (bitpos
, BITS_PER_UNIT
)
7396 || !poly_int_tree_p (DECL_SIZE (TREE_OPERAND (exp
, 1)),
7398 || maybe_ne (decl_bitsize
, bitsize
))
7399 /* A call with an addressable return type and return-slot
7400 optimization must not need bitfield operations but we must
7401 pass down the original target. */
7402 && (TREE_CODE (exp
) != CALL_EXPR
7403 || !TREE_ADDRESSABLE (TREE_TYPE (exp
))
7404 || !CALL_EXPR_RETURN_SLOT_OPT (exp
)))
7405 /* If we are expanding a MEM_REF of a non-BLKmode non-addressable
7406 decl we must use bitfield operations. */
7407 || (known_size_p (bitsize
)
7408 && TREE_CODE (exp
) == MEM_REF
7409 && TREE_CODE (TREE_OPERAND (exp
, 0)) == ADDR_EXPR
7410 && DECL_P (TREE_OPERAND (TREE_OPERAND (exp
, 0), 0))
7411 && !TREE_ADDRESSABLE (TREE_OPERAND (TREE_OPERAND (exp
, 0), 0))
7412 && DECL_MODE (TREE_OPERAND (TREE_OPERAND (exp
, 0), 0)) != BLKmode
))
7417 /* If EXP is a NOP_EXPR of precision less than its mode, then that
7418 implies a mask operation. If the precision is the same size as
7419 the field we're storing into, that mask is redundant. This is
7420 particularly common with bit field assignments generated by the
7422 nop_def
= get_def_for_expr (exp
, NOP_EXPR
);
7425 tree type
= TREE_TYPE (exp
);
7426 if (INTEGRAL_TYPE_P (type
)
7427 && maybe_ne (TYPE_PRECISION (type
),
7428 GET_MODE_BITSIZE (TYPE_MODE (type
)))
7429 && known_eq (bitsize
, TYPE_PRECISION (type
)))
7431 tree op
= gimple_assign_rhs1 (nop_def
);
7432 type
= TREE_TYPE (op
);
7433 if (INTEGRAL_TYPE_P (type
)
7434 && known_ge (TYPE_PRECISION (type
), bitsize
))
7439 temp
= expand_normal (exp
);
7441 /* We don't support variable-sized BLKmode bitfields, since our
7442 handling of BLKmode is bound up with the ability to break
7443 things into words. */
7444 gcc_assert (mode
!= BLKmode
|| bitsize
.is_constant ());
7446 /* Handle calls that return values in multiple non-contiguous locations.
7447 The Irix 6 ABI has examples of this. */
7448 if (GET_CODE (temp
) == PARALLEL
)
7450 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
7451 machine_mode temp_mode
= GET_MODE (temp
);
7452 if (temp_mode
== BLKmode
|| temp_mode
== VOIDmode
)
7453 temp_mode
= smallest_int_mode_for_size (size
* BITS_PER_UNIT
);
7454 rtx temp_target
= gen_reg_rtx (temp_mode
);
7455 emit_group_store (temp_target
, temp
, TREE_TYPE (exp
), size
);
7459 /* Handle calls that return BLKmode values in registers. */
7460 else if (mode
== BLKmode
&& REG_P (temp
) && TREE_CODE (exp
) == CALL_EXPR
)
7462 rtx temp_target
= gen_reg_rtx (GET_MODE (temp
));
7463 copy_blkmode_from_reg (temp_target
, temp
, TREE_TYPE (exp
));
7467 /* If the value has aggregate type and an integral mode then, if BITSIZE
7468 is narrower than this mode and this is for big-endian data, we first
7469 need to put the value into the low-order bits for store_bit_field,
7470 except when MODE is BLKmode and BITSIZE larger than the word size
7471 (see the handling of fields larger than a word in store_bit_field).
7472 Moreover, the field may be not aligned on a byte boundary; in this
7473 case, if it has reverse storage order, it needs to be accessed as a
7474 scalar field with reverse storage order and we must first put the
7475 value into target order. */
7476 scalar_int_mode temp_mode
;
7477 if (AGGREGATE_TYPE_P (TREE_TYPE (exp
))
7478 && is_int_mode (GET_MODE (temp
), &temp_mode
))
7480 HOST_WIDE_INT size
= GET_MODE_BITSIZE (temp_mode
);
7482 reverse
= TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (exp
));
7485 temp
= flip_storage_order (temp_mode
, temp
);
7487 gcc_checking_assert (known_le (bitsize
, size
));
7488 if (maybe_lt (bitsize
, size
)
7489 && reverse
? !BYTES_BIG_ENDIAN
: BYTES_BIG_ENDIAN
7490 /* Use of to_constant for BLKmode was checked above. */
7491 && !(mode
== BLKmode
&& bitsize
.to_constant () > BITS_PER_WORD
))
7492 temp
= expand_shift (RSHIFT_EXPR
, temp_mode
, temp
,
7493 size
- bitsize
, NULL_RTX
, 1);
7496 /* Unless MODE is VOIDmode or BLKmode, convert TEMP to MODE. */
7497 if (mode
!= VOIDmode
&& mode
!= BLKmode
7498 && mode
!= TYPE_MODE (TREE_TYPE (exp
)))
7499 temp
= convert_modes (mode
, TYPE_MODE (TREE_TYPE (exp
)), temp
, 1);
7501 /* If the mode of TEMP and TARGET is BLKmode, both must be in memory
7502 and BITPOS must be aligned on a byte boundary. If so, we simply do
7503 a block copy. Likewise for a BLKmode-like TARGET. */
7504 if (GET_MODE (temp
) == BLKmode
7505 && (GET_MODE (target
) == BLKmode
7507 && GET_MODE_CLASS (GET_MODE (target
)) == MODE_INT
7508 && multiple_p (bitpos
, BITS_PER_UNIT
)
7509 && multiple_p (bitsize
, BITS_PER_UNIT
))))
7511 gcc_assert (MEM_P (target
) && MEM_P (temp
));
7512 poly_int64 bytepos
= exact_div (bitpos
, BITS_PER_UNIT
);
7513 poly_int64 bytesize
= bits_to_bytes_round_up (bitsize
);
7515 target
= adjust_address (target
, VOIDmode
, bytepos
);
7516 emit_block_move (target
, temp
,
7517 gen_int_mode (bytesize
, Pmode
),
7523 /* If the mode of TEMP is still BLKmode and BITSIZE not larger than the
7524 word size, we need to load the value (see again store_bit_field). */
7525 if (GET_MODE (temp
) == BLKmode
&& known_le (bitsize
, BITS_PER_WORD
))
7527 temp_mode
= smallest_int_mode_for_size (bitsize
);
7528 temp
= extract_bit_field (temp
, bitsize
, 0, 1, NULL_RTX
, temp_mode
,
7529 temp_mode
, false, NULL
);
7532 /* Store the value in the bitfield. */
7533 gcc_checking_assert (known_ge (bitpos
, 0));
7534 store_bit_field (target
, bitsize
, bitpos
,
7535 bitregion_start
, bitregion_end
,
7536 mode
, temp
, reverse
);
7542 /* Now build a reference to just the desired component. */
7543 rtx to_rtx
= adjust_address (target
, mode
,
7544 exact_div (bitpos
, BITS_PER_UNIT
));
7546 if (to_rtx
== target
)
7547 to_rtx
= copy_rtx (to_rtx
);
7549 if (!MEM_KEEP_ALIAS_SET_P (to_rtx
) && MEM_ALIAS_SET (to_rtx
) != 0)
7550 set_mem_alias_set (to_rtx
, alias_set
);
7552 /* Above we avoided using bitfield operations for storing a CONSTRUCTOR
7553 into a target smaller than its type; handle that case now. */
7554 if (TREE_CODE (exp
) == CONSTRUCTOR
&& known_size_p (bitsize
))
7556 poly_int64 bytesize
= exact_div (bitsize
, BITS_PER_UNIT
);
7557 store_constructor (exp
, to_rtx
, 0, bytesize
, reverse
);
7561 return store_expr (exp
, to_rtx
, 0, nontemporal
, reverse
);
7565 /* Given an expression EXP that may be a COMPONENT_REF, a BIT_FIELD_REF,
7566 an ARRAY_REF, or an ARRAY_RANGE_REF, look for nested operations of these
7567 codes and find the ultimate containing object, which we return.
7569 We set *PBITSIZE to the size in bits that we want, *PBITPOS to the
7570 bit position, *PUNSIGNEDP to the signedness and *PREVERSEP to the
7571 storage order of the field.
7572 If the position of the field is variable, we store a tree
7573 giving the variable offset (in units) in *POFFSET.
7574 This offset is in addition to the bit position.
7575 If the position is not variable, we store 0 in *POFFSET.
7577 If any of the extraction expressions is volatile,
7578 we store 1 in *PVOLATILEP. Otherwise we don't change that.
7580 If the field is a non-BLKmode bit-field, *PMODE is set to VOIDmode.
7581 Otherwise, it is a mode that can be used to access the field.
7583 If the field describes a variable-sized object, *PMODE is set to
7584 BLKmode and *PBITSIZE is set to -1. An access cannot be made in
7585 this case, but the address of the object can be found. */
7588 get_inner_reference (tree exp
, poly_int64_pod
*pbitsize
,
7589 poly_int64_pod
*pbitpos
, tree
*poffset
,
7590 machine_mode
*pmode
, int *punsignedp
,
7591 int *preversep
, int *pvolatilep
)
7594 machine_mode mode
= VOIDmode
;
7595 bool blkmode_bitfield
= false;
7596 tree offset
= size_zero_node
;
7597 poly_offset_int bit_offset
= 0;
7599 /* First get the mode, signedness, storage order and size. We do this from
7600 just the outermost expression. */
7602 if (TREE_CODE (exp
) == COMPONENT_REF
)
7604 tree field
= TREE_OPERAND (exp
, 1);
7605 size_tree
= DECL_SIZE (field
);
7606 if (flag_strict_volatile_bitfields
> 0
7607 && TREE_THIS_VOLATILE (exp
)
7608 && DECL_BIT_FIELD_TYPE (field
)
7609 && DECL_MODE (field
) != BLKmode
)
7610 /* Volatile bitfields should be accessed in the mode of the
7611 field's type, not the mode computed based on the bit
7613 mode
= TYPE_MODE (DECL_BIT_FIELD_TYPE (field
));
7614 else if (!DECL_BIT_FIELD (field
))
7616 mode
= DECL_MODE (field
);
7617 /* For vector fields re-check the target flags, as DECL_MODE
7618 could have been set with different target flags than
7619 the current function has. */
7621 && VECTOR_TYPE_P (TREE_TYPE (field
))
7622 && VECTOR_MODE_P (TYPE_MODE_RAW (TREE_TYPE (field
))))
7623 mode
= TYPE_MODE (TREE_TYPE (field
));
7625 else if (DECL_MODE (field
) == BLKmode
)
7626 blkmode_bitfield
= true;
7628 *punsignedp
= DECL_UNSIGNED (field
);
7630 else if (TREE_CODE (exp
) == BIT_FIELD_REF
)
7632 size_tree
= TREE_OPERAND (exp
, 1);
7633 *punsignedp
= (! INTEGRAL_TYPE_P (TREE_TYPE (exp
))
7634 || TYPE_UNSIGNED (TREE_TYPE (exp
)));
7636 /* For vector element types with the correct size of access or for
7637 vector typed accesses use the mode of the access type. */
7638 if ((TREE_CODE (TREE_TYPE (TREE_OPERAND (exp
, 0))) == VECTOR_TYPE
7639 && TREE_TYPE (exp
) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp
, 0)))
7640 && tree_int_cst_equal (size_tree
, TYPE_SIZE (TREE_TYPE (exp
))))
7641 || VECTOR_TYPE_P (TREE_TYPE (exp
)))
7642 mode
= TYPE_MODE (TREE_TYPE (exp
));
7646 mode
= TYPE_MODE (TREE_TYPE (exp
));
7647 *punsignedp
= TYPE_UNSIGNED (TREE_TYPE (exp
));
7649 if (mode
== BLKmode
)
7650 size_tree
= TYPE_SIZE (TREE_TYPE (exp
));
7652 *pbitsize
= GET_MODE_BITSIZE (mode
);
7657 if (! tree_fits_uhwi_p (size_tree
))
7658 mode
= BLKmode
, *pbitsize
= -1;
7660 *pbitsize
= tree_to_uhwi (size_tree
);
7663 *preversep
= reverse_storage_order_for_component_p (exp
);
7665 /* Compute cumulative bit-offset for nested component-refs and array-refs,
7666 and find the ultimate containing object. */
7669 switch (TREE_CODE (exp
))
7672 bit_offset
+= wi::to_poly_offset (TREE_OPERAND (exp
, 2));
7677 tree field
= TREE_OPERAND (exp
, 1);
7678 tree this_offset
= component_ref_field_offset (exp
);
7680 /* If this field hasn't been filled in yet, don't go past it.
7681 This should only happen when folding expressions made during
7682 type construction. */
7683 if (this_offset
== 0)
7686 offset
= size_binop (PLUS_EXPR
, offset
, this_offset
);
7687 bit_offset
+= wi::to_poly_offset (DECL_FIELD_BIT_OFFSET (field
));
7689 /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
7694 case ARRAY_RANGE_REF
:
7696 tree index
= TREE_OPERAND (exp
, 1);
7697 tree low_bound
= array_ref_low_bound (exp
);
7698 tree unit_size
= array_ref_element_size (exp
);
7700 /* We assume all arrays have sizes that are a multiple of a byte.
7701 First subtract the lower bound, if any, in the type of the
7702 index, then convert to sizetype and multiply by the size of
7703 the array element. */
7704 if (! integer_zerop (low_bound
))
7705 index
= fold_build2 (MINUS_EXPR
, TREE_TYPE (index
),
7708 offset
= size_binop (PLUS_EXPR
, offset
,
7709 size_binop (MULT_EXPR
,
7710 fold_convert (sizetype
, index
),
7719 bit_offset
+= *pbitsize
;
7722 case VIEW_CONVERT_EXPR
:
7726 /* Hand back the decl for MEM[&decl, off]. */
7727 if (TREE_CODE (TREE_OPERAND (exp
, 0)) == ADDR_EXPR
)
7729 tree off
= TREE_OPERAND (exp
, 1);
7730 if (!integer_zerop (off
))
7732 poly_offset_int boff
= mem_ref_offset (exp
);
7733 boff
<<= LOG2_BITS_PER_UNIT
;
7736 exp
= TREE_OPERAND (TREE_OPERAND (exp
, 0), 0);
7744 /* If any reference in the chain is volatile, the effect is volatile. */
7745 if (TREE_THIS_VOLATILE (exp
))
7748 exp
= TREE_OPERAND (exp
, 0);
7752 /* If OFFSET is constant, see if we can return the whole thing as a
7753 constant bit position. Make sure to handle overflow during
7755 if (poly_int_tree_p (offset
))
7757 poly_offset_int tem
= wi::sext (wi::to_poly_offset (offset
),
7758 TYPE_PRECISION (sizetype
));
7759 tem
<<= LOG2_BITS_PER_UNIT
;
7761 if (tem
.to_shwi (pbitpos
))
7762 *poffset
= offset
= NULL_TREE
;
7765 /* Otherwise, split it up. */
7768 /* Avoid returning a negative bitpos as this may wreak havoc later. */
7769 if (!bit_offset
.to_shwi (pbitpos
) || maybe_lt (*pbitpos
, 0))
7771 *pbitpos
= num_trailing_bits (bit_offset
.force_shwi ());
7772 poly_offset_int bytes
= bits_to_bytes_round_down (bit_offset
);
7773 offset
= size_binop (PLUS_EXPR
, offset
,
7774 build_int_cst (sizetype
, bytes
.force_shwi ()));
7780 /* We can use BLKmode for a byte-aligned BLKmode bitfield. */
7781 if (mode
== VOIDmode
7783 && multiple_p (*pbitpos
, BITS_PER_UNIT
)
7784 && multiple_p (*pbitsize
, BITS_PER_UNIT
))
7792 /* Alignment in bits the TARGET of an assignment may be assumed to have. */
7794 static unsigned HOST_WIDE_INT
7795 target_align (const_tree target
)
7797 /* We might have a chain of nested references with intermediate misaligning
7798 bitfields components, so need to recurse to find out. */
7800 unsigned HOST_WIDE_INT this_align
, outer_align
;
7802 switch (TREE_CODE (target
))
7808 this_align
= DECL_ALIGN (TREE_OPERAND (target
, 1));
7809 outer_align
= target_align (TREE_OPERAND (target
, 0));
7810 return MIN (this_align
, outer_align
);
7813 case ARRAY_RANGE_REF
:
7814 this_align
= TYPE_ALIGN (TREE_TYPE (target
));
7815 outer_align
= target_align (TREE_OPERAND (target
, 0));
7816 return MIN (this_align
, outer_align
);
7819 case NON_LVALUE_EXPR
:
7820 case VIEW_CONVERT_EXPR
:
7821 this_align
= TYPE_ALIGN (TREE_TYPE (target
));
7822 outer_align
= target_align (TREE_OPERAND (target
, 0));
7823 return MAX (this_align
, outer_align
);
7826 return TYPE_ALIGN (TREE_TYPE (target
));
7831 /* Given an rtx VALUE that may contain additions and multiplications, return
7832 an equivalent value that just refers to a register, memory, or constant.
7833 This is done by generating instructions to perform the arithmetic and
7834 returning a pseudo-register containing the value.
7836 The returned value may be a REG, SUBREG, MEM or constant. */
7839 force_operand (rtx value
, rtx target
)
7842 /* Use subtarget as the target for operand 0 of a binary operation. */
7843 rtx subtarget
= get_subtarget (target
);
7844 enum rtx_code code
= GET_CODE (value
);
7846 /* Check for subreg applied to an expression produced by loop optimizer. */
7848 && !REG_P (SUBREG_REG (value
))
7849 && !MEM_P (SUBREG_REG (value
)))
7852 = simplify_gen_subreg (GET_MODE (value
),
7853 force_reg (GET_MODE (SUBREG_REG (value
)),
7854 force_operand (SUBREG_REG (value
),
7856 GET_MODE (SUBREG_REG (value
)),
7857 SUBREG_BYTE (value
));
7858 code
= GET_CODE (value
);
7861 /* Check for a PIC address load. */
7862 if ((code
== PLUS
|| code
== MINUS
)
7863 && XEXP (value
, 0) == pic_offset_table_rtx
7864 && (GET_CODE (XEXP (value
, 1)) == SYMBOL_REF
7865 || GET_CODE (XEXP (value
, 1)) == LABEL_REF
7866 || GET_CODE (XEXP (value
, 1)) == CONST
))
7869 subtarget
= gen_reg_rtx (GET_MODE (value
));
7870 emit_move_insn (subtarget
, value
);
7874 if (ARITHMETIC_P (value
))
7876 op2
= XEXP (value
, 1);
7877 if (!CONSTANT_P (op2
) && !(REG_P (op2
) && op2
!= subtarget
))
7879 if (code
== MINUS
&& CONST_INT_P (op2
))
7882 op2
= negate_rtx (GET_MODE (value
), op2
);
7885 /* Check for an addition with OP2 a constant integer and our first
7886 operand a PLUS of a virtual register and something else. In that
7887 case, we want to emit the sum of the virtual register and the
7888 constant first and then add the other value. This allows virtual
7889 register instantiation to simply modify the constant rather than
7890 creating another one around this addition. */
7891 if (code
== PLUS
&& CONST_INT_P (op2
)
7892 && GET_CODE (XEXP (value
, 0)) == PLUS
7893 && REG_P (XEXP (XEXP (value
, 0), 0))
7894 && REGNO (XEXP (XEXP (value
, 0), 0)) >= FIRST_VIRTUAL_REGISTER
7895 && REGNO (XEXP (XEXP (value
, 0), 0)) <= LAST_VIRTUAL_REGISTER
)
7897 rtx temp
= expand_simple_binop (GET_MODE (value
), code
,
7898 XEXP (XEXP (value
, 0), 0), op2
,
7899 subtarget
, 0, OPTAB_LIB_WIDEN
);
7900 return expand_simple_binop (GET_MODE (value
), code
, temp
,
7901 force_operand (XEXP (XEXP (value
,
7903 target
, 0, OPTAB_LIB_WIDEN
);
7906 op1
= force_operand (XEXP (value
, 0), subtarget
);
7907 op2
= force_operand (op2
, NULL_RTX
);
7911 return expand_mult (GET_MODE (value
), op1
, op2
, target
, 1);
7913 if (!INTEGRAL_MODE_P (GET_MODE (value
)))
7914 return expand_simple_binop (GET_MODE (value
), code
, op1
, op2
,
7915 target
, 1, OPTAB_LIB_WIDEN
);
7917 return expand_divmod (0,
7918 FLOAT_MODE_P (GET_MODE (value
))
7919 ? RDIV_EXPR
: TRUNC_DIV_EXPR
,
7920 GET_MODE (value
), op1
, op2
, target
, 0);
7922 return expand_divmod (1, TRUNC_MOD_EXPR
, GET_MODE (value
), op1
, op2
,
7925 return expand_divmod (0, TRUNC_DIV_EXPR
, GET_MODE (value
), op1
, op2
,
7928 return expand_divmod (1, TRUNC_MOD_EXPR
, GET_MODE (value
), op1
, op2
,
7931 return expand_simple_binop (GET_MODE (value
), code
, op1
, op2
,
7932 target
, 0, OPTAB_LIB_WIDEN
);
7934 return expand_simple_binop (GET_MODE (value
), code
, op1
, op2
,
7935 target
, 1, OPTAB_LIB_WIDEN
);
7938 if (UNARY_P (value
))
7941 target
= gen_reg_rtx (GET_MODE (value
));
7942 op1
= force_operand (XEXP (value
, 0), NULL_RTX
);
7949 case FLOAT_TRUNCATE
:
7950 convert_move (target
, op1
, code
== ZERO_EXTEND
);
7955 expand_fix (target
, op1
, code
== UNSIGNED_FIX
);
7959 case UNSIGNED_FLOAT
:
7960 expand_float (target
, op1
, code
== UNSIGNED_FLOAT
);
7964 return expand_simple_unop (GET_MODE (value
), code
, op1
, target
, 0);
7968 #ifdef INSN_SCHEDULING
7969 /* On machines that have insn scheduling, we want all memory reference to be
7970 explicit, so we need to deal with such paradoxical SUBREGs. */
7971 if (paradoxical_subreg_p (value
) && MEM_P (SUBREG_REG (value
)))
7973 = simplify_gen_subreg (GET_MODE (value
),
7974 force_reg (GET_MODE (SUBREG_REG (value
)),
7975 force_operand (SUBREG_REG (value
),
7977 GET_MODE (SUBREG_REG (value
)),
7978 SUBREG_BYTE (value
));
7984 /* Subroutine of expand_expr: return nonzero iff there is no way that
7985 EXP can reference X, which is being modified. TOP_P is nonzero if this
7986 call is going to be used to determine whether we need a temporary
7987 for EXP, as opposed to a recursive call to this function.
7989 It is always safe for this routine to return zero since it merely
7990 searches for optimization opportunities. */
7993 safe_from_p (const_rtx x
, tree exp
, int top_p
)
7999 /* If EXP has varying size, we MUST use a target since we currently
8000 have no way of allocating temporaries of variable size
8001 (except for arrays that have TYPE_ARRAY_MAX_SIZE set).
8002 So we assume here that something at a higher level has prevented a
8003 clash. This is somewhat bogus, but the best we can do. Only
8004 do this when X is BLKmode and when we are at the top level. */
8005 || (top_p
&& TREE_TYPE (exp
) != 0 && COMPLETE_TYPE_P (TREE_TYPE (exp
))
8006 && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp
))) != INTEGER_CST
8007 && (TREE_CODE (TREE_TYPE (exp
)) != ARRAY_TYPE
8008 || TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp
)) == NULL_TREE
8009 || TREE_CODE (TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp
)))
8011 && GET_MODE (x
) == BLKmode
)
8012 /* If X is in the outgoing argument area, it is always safe. */
8014 && (XEXP (x
, 0) == virtual_outgoing_args_rtx
8015 || (GET_CODE (XEXP (x
, 0)) == PLUS
8016 && XEXP (XEXP (x
, 0), 0) == virtual_outgoing_args_rtx
))))
8019 /* If this is a subreg of a hard register, declare it unsafe, otherwise,
8020 find the underlying pseudo. */
8021 if (GET_CODE (x
) == SUBREG
)
8024 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
8028 /* Now look at our tree code and possibly recurse. */
8029 switch (TREE_CODE_CLASS (TREE_CODE (exp
)))
8031 case tcc_declaration
:
8032 exp_rtl
= DECL_RTL_IF_SET (exp
);
8038 case tcc_exceptional
:
8039 if (TREE_CODE (exp
) == TREE_LIST
)
8043 if (TREE_VALUE (exp
) && !safe_from_p (x
, TREE_VALUE (exp
), 0))
8045 exp
= TREE_CHAIN (exp
);
8048 if (TREE_CODE (exp
) != TREE_LIST
)
8049 return safe_from_p (x
, exp
, 0);
8052 else if (TREE_CODE (exp
) == CONSTRUCTOR
)
8054 constructor_elt
*ce
;
8055 unsigned HOST_WIDE_INT idx
;
8057 FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (exp
), idx
, ce
)
8058 if ((ce
->index
!= NULL_TREE
&& !safe_from_p (x
, ce
->index
, 0))
8059 || !safe_from_p (x
, ce
->value
, 0))
8063 else if (TREE_CODE (exp
) == ERROR_MARK
)
8064 return 1; /* An already-visited SAVE_EXPR? */
8069 /* The only case we look at here is the DECL_INITIAL inside a
8071 return (TREE_CODE (exp
) != DECL_EXPR
8072 || TREE_CODE (DECL_EXPR_DECL (exp
)) != VAR_DECL
8073 || !DECL_INITIAL (DECL_EXPR_DECL (exp
))
8074 || safe_from_p (x
, DECL_INITIAL (DECL_EXPR_DECL (exp
)), 0));
8077 case tcc_comparison
:
8078 if (!safe_from_p (x
, TREE_OPERAND (exp
, 1), 0))
8083 return safe_from_p (x
, TREE_OPERAND (exp
, 0), 0);
8085 case tcc_expression
:
8088 /* Now do code-specific tests. EXP_RTL is set to any rtx we find in
8089 the expression. If it is set, we conflict iff we are that rtx or
8090 both are in memory. Otherwise, we check all operands of the
8091 expression recursively. */
8093 switch (TREE_CODE (exp
))
8096 /* If the operand is static or we are static, we can't conflict.
8097 Likewise if we don't conflict with the operand at all. */
8098 if (staticp (TREE_OPERAND (exp
, 0))
8099 || TREE_STATIC (exp
)
8100 || safe_from_p (x
, TREE_OPERAND (exp
, 0), 0))
8103 /* Otherwise, the only way this can conflict is if we are taking
8104 the address of a DECL a that address if part of X, which is
8106 exp
= TREE_OPERAND (exp
, 0);
8109 if (!DECL_RTL_SET_P (exp
)
8110 || !MEM_P (DECL_RTL (exp
)))
8113 exp_rtl
= XEXP (DECL_RTL (exp
), 0);
8119 && alias_sets_conflict_p (MEM_ALIAS_SET (x
),
8120 get_alias_set (exp
)))
8125 /* Assume that the call will clobber all hard registers and
8127 if ((REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
8132 case WITH_CLEANUP_EXPR
:
8133 case CLEANUP_POINT_EXPR
:
8134 /* Lowered by gimplify.c. */
8138 return safe_from_p (x
, TREE_OPERAND (exp
, 0), 0);
8144 /* If we have an rtx, we do not need to scan our operands. */
8148 nops
= TREE_OPERAND_LENGTH (exp
);
8149 for (i
= 0; i
< nops
; i
++)
8150 if (TREE_OPERAND (exp
, i
) != 0
8151 && ! safe_from_p (x
, TREE_OPERAND (exp
, i
), 0))
8157 /* Should never get a type here. */
8161 /* If we have an rtl, find any enclosed object. Then see if we conflict
8165 if (GET_CODE (exp_rtl
) == SUBREG
)
8167 exp_rtl
= SUBREG_REG (exp_rtl
);
8169 && REGNO (exp_rtl
) < FIRST_PSEUDO_REGISTER
)
8173 /* If the rtl is X, then it is not safe. Otherwise, it is unless both
8174 are memory and they conflict. */
8175 return ! (rtx_equal_p (x
, exp_rtl
)
8176 || (MEM_P (x
) && MEM_P (exp_rtl
)
8177 && true_dependence (exp_rtl
, VOIDmode
, x
)));
8180 /* If we reach here, it is safe. */
8185 /* Return the highest power of two that EXP is known to be a multiple of.
8186 This is used in updating alignment of MEMs in array references. */
8188 unsigned HOST_WIDE_INT
8189 highest_pow2_factor (const_tree exp
)
8191 unsigned HOST_WIDE_INT ret
;
8192 int trailing_zeros
= tree_ctz (exp
);
8193 if (trailing_zeros
>= HOST_BITS_PER_WIDE_INT
)
8194 return BIGGEST_ALIGNMENT
;
8195 ret
= HOST_WIDE_INT_1U
<< trailing_zeros
;
8196 if (ret
> BIGGEST_ALIGNMENT
)
8197 return BIGGEST_ALIGNMENT
;
8201 /* Similar, except that the alignment requirements of TARGET are
8202 taken into account. Assume it is at least as aligned as its
8203 type, unless it is a COMPONENT_REF in which case the layout of
8204 the structure gives the alignment. */
8206 static unsigned HOST_WIDE_INT
8207 highest_pow2_factor_for_target (const_tree target
, const_tree exp
)
8209 unsigned HOST_WIDE_INT talign
= target_align (target
) / BITS_PER_UNIT
;
8210 unsigned HOST_WIDE_INT factor
= highest_pow2_factor (exp
);
8212 return MAX (factor
, talign
);
8215 /* Convert the tree comparison code TCODE to the rtl one where the
8216 signedness is UNSIGNEDP. */
8218 static enum rtx_code
8219 convert_tree_comp_to_rtx (enum tree_code tcode
, int unsignedp
)
8231 code
= unsignedp
? LTU
: LT
;
8234 code
= unsignedp
? LEU
: LE
;
8237 code
= unsignedp
? GTU
: GT
;
8240 code
= unsignedp
? GEU
: GE
;
8242 case UNORDERED_EXPR
:
8273 /* Subroutine of expand_expr. Expand the two operands of a binary
8274 expression EXP0 and EXP1 placing the results in OP0 and OP1.
8275 The value may be stored in TARGET if TARGET is nonzero. The
8276 MODIFIER argument is as documented by expand_expr. */
8279 expand_operands (tree exp0
, tree exp1
, rtx target
, rtx
*op0
, rtx
*op1
,
8280 enum expand_modifier modifier
)
8282 if (! safe_from_p (target
, exp1
, 1))
8284 if (operand_equal_p (exp0
, exp1
, 0))
8286 *op0
= expand_expr (exp0
, target
, VOIDmode
, modifier
);
8287 *op1
= copy_rtx (*op0
);
8291 *op0
= expand_expr (exp0
, target
, VOIDmode
, modifier
);
8292 *op1
= expand_expr (exp1
, NULL_RTX
, VOIDmode
, modifier
);
8297 /* Return a MEM that contains constant EXP. DEFER is as for
8298 output_constant_def and MODIFIER is as for expand_expr. */
8301 expand_expr_constant (tree exp
, int defer
, enum expand_modifier modifier
)
8305 mem
= output_constant_def (exp
, defer
);
8306 if (modifier
!= EXPAND_INITIALIZER
)
8307 mem
= use_anchored_address (mem
);
8311 /* A subroutine of expand_expr_addr_expr. Evaluate the address of EXP.
8312 The TARGET, TMODE and MODIFIER arguments are as for expand_expr. */
8315 expand_expr_addr_expr_1 (tree exp
, rtx target
, scalar_int_mode tmode
,
8316 enum expand_modifier modifier
, addr_space_t as
)
8318 rtx result
, subtarget
;
8320 poly_int64 bitsize
, bitpos
;
8321 int unsignedp
, reversep
, volatilep
= 0;
8324 /* If we are taking the address of a constant and are at the top level,
8325 we have to use output_constant_def since we can't call force_const_mem
8327 /* ??? This should be considered a front-end bug. We should not be
8328 generating ADDR_EXPR of something that isn't an LVALUE. The only
8329 exception here is STRING_CST. */
8330 if (CONSTANT_CLASS_P (exp
))
8332 result
= XEXP (expand_expr_constant (exp
, 0, modifier
), 0);
8333 if (modifier
< EXPAND_SUM
)
8334 result
= force_operand (result
, target
);
8338 /* Everything must be something allowed by is_gimple_addressable. */
8339 switch (TREE_CODE (exp
))
8342 /* This case will happen via recursion for &a->b. */
8343 return expand_expr (TREE_OPERAND (exp
, 0), target
, tmode
, modifier
);
8347 tree tem
= TREE_OPERAND (exp
, 0);
8348 if (!integer_zerop (TREE_OPERAND (exp
, 1)))
8349 tem
= fold_build_pointer_plus (tem
, TREE_OPERAND (exp
, 1));
8350 return expand_expr (tem
, target
, tmode
, modifier
);
8353 case TARGET_MEM_REF
:
8354 return addr_for_mem_ref (exp
, as
, true);
8357 /* Expand the initializer like constants above. */
8358 result
= XEXP (expand_expr_constant (DECL_INITIAL (exp
),
8360 if (modifier
< EXPAND_SUM
)
8361 result
= force_operand (result
, target
);
8365 /* The real part of the complex number is always first, therefore
8366 the address is the same as the address of the parent object. */
8369 inner
= TREE_OPERAND (exp
, 0);
8373 /* The imaginary part of the complex number is always second.
8374 The expression is therefore always offset by the size of the
8377 bitpos
= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (exp
)));
8378 inner
= TREE_OPERAND (exp
, 0);
8381 case COMPOUND_LITERAL_EXPR
:
8382 /* Allow COMPOUND_LITERAL_EXPR in initializers or coming from
8383 initializers, if e.g. rtl_for_decl_init is called on DECL_INITIAL
8384 with COMPOUND_LITERAL_EXPRs in it, or ARRAY_REF on a const static
8385 array with address of COMPOUND_LITERAL_EXPR in DECL_INITIAL;
8386 the initializers aren't gimplified. */
8387 if (COMPOUND_LITERAL_EXPR_DECL (exp
)
8388 && is_global_var (COMPOUND_LITERAL_EXPR_DECL (exp
)))
8389 return expand_expr_addr_expr_1 (COMPOUND_LITERAL_EXPR_DECL (exp
),
8390 target
, tmode
, modifier
, as
);
8393 /* If the object is a DECL, then expand it for its rtl. Don't bypass
8394 expand_expr, as that can have various side effects; LABEL_DECLs for
8395 example, may not have their DECL_RTL set yet. Expand the rtl of
8396 CONSTRUCTORs too, which should yield a memory reference for the
8397 constructor's contents. Assume language specific tree nodes can
8398 be expanded in some interesting way. */
8399 gcc_assert (TREE_CODE (exp
) < LAST_AND_UNUSED_TREE_CODE
);
8401 || TREE_CODE (exp
) == CONSTRUCTOR
8402 || TREE_CODE (exp
) == COMPOUND_LITERAL_EXPR
)
8404 result
= expand_expr (exp
, target
, tmode
,
8405 modifier
== EXPAND_INITIALIZER
8406 ? EXPAND_INITIALIZER
: EXPAND_CONST_ADDRESS
);
8408 /* If the DECL isn't in memory, then the DECL wasn't properly
8409 marked TREE_ADDRESSABLE, which will be either a front-end
8410 or a tree optimizer bug. */
8412 gcc_assert (MEM_P (result
));
8413 result
= XEXP (result
, 0);
8415 /* ??? Is this needed anymore? */
8417 TREE_USED (exp
) = 1;
8419 if (modifier
!= EXPAND_INITIALIZER
8420 && modifier
!= EXPAND_CONST_ADDRESS
8421 && modifier
!= EXPAND_SUM
)
8422 result
= force_operand (result
, target
);
8426 /* Pass FALSE as the last argument to get_inner_reference although
8427 we are expanding to RTL. The rationale is that we know how to
8428 handle "aligning nodes" here: we can just bypass them because
8429 they won't change the final object whose address will be returned
8430 (they actually exist only for that purpose). */
8431 inner
= get_inner_reference (exp
, &bitsize
, &bitpos
, &offset
, &mode1
,
8432 &unsignedp
, &reversep
, &volatilep
);
8436 /* We must have made progress. */
8437 gcc_assert (inner
!= exp
);
8439 subtarget
= offset
|| maybe_ne (bitpos
, 0) ? NULL_RTX
: target
;
8440 /* For VIEW_CONVERT_EXPR, where the outer alignment is bigger than
8441 inner alignment, force the inner to be sufficiently aligned. */
8442 if (CONSTANT_CLASS_P (inner
)
8443 && TYPE_ALIGN (TREE_TYPE (inner
)) < TYPE_ALIGN (TREE_TYPE (exp
)))
8445 inner
= copy_node (inner
);
8446 TREE_TYPE (inner
) = copy_node (TREE_TYPE (inner
));
8447 SET_TYPE_ALIGN (TREE_TYPE (inner
), TYPE_ALIGN (TREE_TYPE (exp
)));
8448 TYPE_USER_ALIGN (TREE_TYPE (inner
)) = 1;
8450 result
= expand_expr_addr_expr_1 (inner
, subtarget
, tmode
, modifier
, as
);
8456 if (modifier
!= EXPAND_NORMAL
)
8457 result
= force_operand (result
, NULL
);
8458 tmp
= expand_expr (offset
, NULL_RTX
, tmode
,
8459 modifier
== EXPAND_INITIALIZER
8460 ? EXPAND_INITIALIZER
: EXPAND_NORMAL
);
8462 /* expand_expr is allowed to return an object in a mode other
8463 than TMODE. If it did, we need to convert. */
8464 if (GET_MODE (tmp
) != VOIDmode
&& tmode
!= GET_MODE (tmp
))
8465 tmp
= convert_modes (tmode
, GET_MODE (tmp
),
8466 tmp
, TYPE_UNSIGNED (TREE_TYPE (offset
)));
8467 result
= convert_memory_address_addr_space (tmode
, result
, as
);
8468 tmp
= convert_memory_address_addr_space (tmode
, tmp
, as
);
8470 if (modifier
== EXPAND_SUM
|| modifier
== EXPAND_INITIALIZER
)
8471 result
= simplify_gen_binary (PLUS
, tmode
, result
, tmp
);
8474 subtarget
= maybe_ne (bitpos
, 0) ? NULL_RTX
: target
;
8475 result
= expand_simple_binop (tmode
, PLUS
, result
, tmp
, subtarget
,
8476 1, OPTAB_LIB_WIDEN
);
8480 if (maybe_ne (bitpos
, 0))
8482 /* Someone beforehand should have rejected taking the address
8483 of an object that isn't byte-aligned. */
8484 poly_int64 bytepos
= exact_div (bitpos
, BITS_PER_UNIT
);
8485 result
= convert_memory_address_addr_space (tmode
, result
, as
);
8486 result
= plus_constant (tmode
, result
, bytepos
);
8487 if (modifier
< EXPAND_SUM
)
8488 result
= force_operand (result
, target
);
8494 /* A subroutine of expand_expr. Evaluate EXP, which is an ADDR_EXPR.
8495 The TARGET, TMODE and MODIFIER arguments are as for expand_expr. */
8498 expand_expr_addr_expr (tree exp
, rtx target
, machine_mode tmode
,
8499 enum expand_modifier modifier
)
8501 addr_space_t as
= ADDR_SPACE_GENERIC
;
8502 scalar_int_mode address_mode
= Pmode
;
8503 scalar_int_mode pointer_mode
= ptr_mode
;
8507 /* Target mode of VOIDmode says "whatever's natural". */
8508 if (tmode
== VOIDmode
)
8509 tmode
= TYPE_MODE (TREE_TYPE (exp
));
8511 if (POINTER_TYPE_P (TREE_TYPE (exp
)))
8513 as
= TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp
)));
8514 address_mode
= targetm
.addr_space
.address_mode (as
);
8515 pointer_mode
= targetm
.addr_space
.pointer_mode (as
);
8518 /* We can get called with some Weird Things if the user does silliness
8519 like "(short) &a". In that case, convert_memory_address won't do
8520 the right thing, so ignore the given target mode. */
8521 scalar_int_mode new_tmode
= (tmode
== pointer_mode
8525 result
= expand_expr_addr_expr_1 (TREE_OPERAND (exp
, 0), target
,
8526 new_tmode
, modifier
, as
);
8528 /* Despite expand_expr claims concerning ignoring TMODE when not
8529 strictly convenient, stuff breaks if we don't honor it. Note
8530 that combined with the above, we only do this for pointer modes. */
8531 rmode
= GET_MODE (result
);
8532 if (rmode
== VOIDmode
)
8534 if (rmode
!= new_tmode
)
8535 result
= convert_memory_address_addr_space (new_tmode
, result
, as
);
8540 /* Generate code for computing CONSTRUCTOR EXP.
8541 An rtx for the computed value is returned. If AVOID_TEMP_MEM
8542 is TRUE, instead of creating a temporary variable in memory
8543 NULL is returned and the caller needs to handle it differently. */
8546 expand_constructor (tree exp
, rtx target
, enum expand_modifier modifier
,
8547 bool avoid_temp_mem
)
8549 tree type
= TREE_TYPE (exp
);
8550 machine_mode mode
= TYPE_MODE (type
);
8552 /* Try to avoid creating a temporary at all. This is possible
8553 if all of the initializer is zero.
8554 FIXME: try to handle all [0..255] initializers we can handle
8556 if (TREE_STATIC (exp
)
8557 && !TREE_ADDRESSABLE (exp
)
8558 && target
!= 0 && mode
== BLKmode
8559 && all_zeros_p (exp
))
8561 clear_storage (target
, expr_size (exp
), BLOCK_OP_NORMAL
);
8565 /* All elts simple constants => refer to a constant in memory. But
8566 if this is a non-BLKmode mode, let it store a field at a time
8567 since that should make a CONST_INT, CONST_WIDE_INT or
8568 CONST_DOUBLE when we fold. Likewise, if we have a target we can
8569 use, it is best to store directly into the target unless the type
8570 is large enough that memcpy will be used. If we are making an
8571 initializer and all operands are constant, put it in memory as
8574 FIXME: Avoid trying to fill vector constructors piece-meal.
8575 Output them with output_constant_def below unless we're sure
8576 they're zeros. This should go away when vector initializers
8577 are treated like VECTOR_CST instead of arrays. */
8578 if ((TREE_STATIC (exp
)
8579 && ((mode
== BLKmode
8580 && ! (target
!= 0 && safe_from_p (target
, exp
, 1)))
8581 || TREE_ADDRESSABLE (exp
)
8582 || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
))
8583 && (! can_move_by_pieces
8584 (tree_to_uhwi (TYPE_SIZE_UNIT (type
)),
8586 && ! mostly_zeros_p (exp
))))
8587 || ((modifier
== EXPAND_INITIALIZER
|| modifier
== EXPAND_CONST_ADDRESS
)
8588 && TREE_CONSTANT (exp
)))
8595 constructor
= expand_expr_constant (exp
, 1, modifier
);
8597 if (modifier
!= EXPAND_CONST_ADDRESS
8598 && modifier
!= EXPAND_INITIALIZER
8599 && modifier
!= EXPAND_SUM
)
8600 constructor
= validize_mem (constructor
);
8605 /* If the CTOR is available in static storage and not mostly
8606 zeros and we can move it by pieces prefer to do so since
8607 that's usually more efficient than performing a series of
8608 stores from immediates. */
8610 && TREE_STATIC (exp
)
8611 && TREE_CONSTANT (exp
)
8612 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
))
8613 && can_move_by_pieces (tree_to_uhwi (TYPE_SIZE_UNIT (type
)),
8615 && ! mostly_zeros_p (exp
))
8618 /* Handle calls that pass values in multiple non-contiguous
8619 locations. The Irix 6 ABI has examples of this. */
8620 if (target
== 0 || ! safe_from_p (target
, exp
, 1)
8621 || GET_CODE (target
) == PARALLEL
|| modifier
== EXPAND_STACK_PARM
8622 /* Also make a temporary if the store is to volatile memory, to
8623 avoid individual accesses to aggregate members. */
8624 || (GET_CODE (target
) == MEM
8625 && MEM_VOLATILE_P (target
)
8626 && !TREE_ADDRESSABLE (TREE_TYPE (exp
))))
8631 target
= assign_temp (type
, TREE_ADDRESSABLE (exp
), 1);
8634 store_constructor (exp
, target
, 0, int_expr_size (exp
), false);
8639 /* expand_expr: generate code for computing expression EXP.
8640 An rtx for the computed value is returned. The value is never null.
8641 In the case of a void EXP, const0_rtx is returned.
8643 The value may be stored in TARGET if TARGET is nonzero.
8644 TARGET is just a suggestion; callers must assume that
8645 the rtx returned may not be the same as TARGET.
8647 If TARGET is CONST0_RTX, it means that the value will be ignored.
8649 If TMODE is not VOIDmode, it suggests generating the
8650 result in mode TMODE. But this is done only when convenient.
8651 Otherwise, TMODE is ignored and the value generated in its natural mode.
8652 TMODE is just a suggestion; callers must assume that
8653 the rtx returned may not have mode TMODE.
8655 Note that TARGET may have neither TMODE nor MODE. In that case, it
8656 probably will not be used.
8658 If MODIFIER is EXPAND_SUM then when EXP is an addition
8659 we can return an rtx of the form (MULT (REG ...) (CONST_INT ...))
8660 or a nest of (PLUS ...) and (MINUS ...) where the terms are
8661 products as above, or REG or MEM, or constant.
8662 Ordinarily in such cases we would output mul or add instructions
8663 and then return a pseudo reg containing the sum.
8665 EXPAND_INITIALIZER is much like EXPAND_SUM except that
8666 it also marks a label as absolutely required (it can't be dead).
8667 It also makes a ZERO_EXTEND or SIGN_EXTEND instead of emitting extend insns.
8668 This is used for outputting expressions used in initializers.
8670 EXPAND_CONST_ADDRESS says that it is okay to return a MEM
8671 with a constant address even if that address is not normally legitimate.
8672 EXPAND_INITIALIZER and EXPAND_SUM also have this effect.
8674 EXPAND_STACK_PARM is used when expanding to a TARGET on the stack for
8675 a call parameter. Such targets require special care as we haven't yet
8676 marked TARGET so that it's safe from being trashed by libcalls. We
8677 don't want to use TARGET for anything but the final result;
8678 Intermediate values must go elsewhere. Additionally, calls to
8679 emit_block_move will be flagged with BLOCK_OP_CALL_PARM.
8681 If EXP is a VAR_DECL whose DECL_RTL was a MEM with an invalid
8682 address, and ALT_RTL is non-NULL, then *ALT_RTL is set to the
8683 DECL_RTL of the VAR_DECL. *ALT_RTL is also set if EXP is a
8684 COMPOUND_EXPR whose second argument is such a VAR_DECL, and so on
8686 If the result can be stored at TARGET, and ALT_RTL is non-NULL,
8687 then *ALT_RTL is set to TARGET (before legitimziation).
8689 If INNER_REFERENCE_P is true, we are expanding an inner reference.
8690 In this case, we don't adjust a returned MEM rtx that wouldn't be
8691 sufficiently aligned for its mode; instead, it's up to the caller
8692 to deal with it afterwards. This is used to make sure that unaligned
8693 base objects for which out-of-bounds accesses are supported, for
8694 example record types with trailing arrays, aren't realigned behind
8695 the back of the caller.
8696 The normal operating mode is to pass FALSE for this parameter. */
8699 expand_expr_real (tree exp
, rtx target
, machine_mode tmode
,
8700 enum expand_modifier modifier
, rtx
*alt_rtl
,
8701 bool inner_reference_p
)
8705 /* Handle ERROR_MARK before anybody tries to access its type. */
8706 if (TREE_CODE (exp
) == ERROR_MARK
8707 || (TREE_CODE (TREE_TYPE (exp
)) == ERROR_MARK
))
8709 ret
= CONST0_RTX (tmode
);
8710 return ret
? ret
: const0_rtx
;
8713 ret
= expand_expr_real_1 (exp
, target
, tmode
, modifier
, alt_rtl
,
8718 /* Try to expand the conditional expression which is represented by
8719 TREEOP0 ? TREEOP1 : TREEOP2 using conditonal moves. If it succeeds
8720 return the rtl reg which represents the result. Otherwise return
8724 expand_cond_expr_using_cmove (tree treeop0 ATTRIBUTE_UNUSED
,
8725 tree treeop1 ATTRIBUTE_UNUSED
,
8726 tree treeop2 ATTRIBUTE_UNUSED
)
8729 rtx op00
, op01
, op1
, op2
;
8730 enum rtx_code comparison_code
;
8731 machine_mode comparison_mode
;
8734 tree type
= TREE_TYPE (treeop1
);
8735 int unsignedp
= TYPE_UNSIGNED (type
);
8736 machine_mode mode
= TYPE_MODE (type
);
8737 machine_mode orig_mode
= mode
;
8738 static bool expanding_cond_expr_using_cmove
= false;
8740 /* Conditional move expansion can end up TERing two operands which,
8741 when recursively hitting conditional expressions can result in
8742 exponential behavior if the cmove expansion ultimatively fails.
8743 It's hardly profitable to TER a cmove into a cmove so avoid doing
8744 that by failing early if we end up recursing. */
8745 if (expanding_cond_expr_using_cmove
)
8748 /* If we cannot do a conditional move on the mode, try doing it
8749 with the promoted mode. */
8750 if (!can_conditionally_move_p (mode
))
8752 mode
= promote_mode (type
, mode
, &unsignedp
);
8753 if (!can_conditionally_move_p (mode
))
8755 temp
= assign_temp (type
, 0, 0); /* Use promoted mode for temp. */
8758 temp
= assign_temp (type
, 0, 1);
8760 expanding_cond_expr_using_cmove
= true;
8762 expand_operands (treeop1
, treeop2
,
8763 temp
, &op1
, &op2
, EXPAND_NORMAL
);
8765 if (TREE_CODE (treeop0
) == SSA_NAME
8766 && (srcstmt
= get_def_for_expr_class (treeop0
, tcc_comparison
)))
8768 type
= TREE_TYPE (gimple_assign_rhs1 (srcstmt
));
8769 enum tree_code cmpcode
= gimple_assign_rhs_code (srcstmt
);
8770 op00
= expand_normal (gimple_assign_rhs1 (srcstmt
));
8771 op01
= expand_normal (gimple_assign_rhs2 (srcstmt
));
8772 comparison_mode
= TYPE_MODE (type
);
8773 unsignedp
= TYPE_UNSIGNED (type
);
8774 comparison_code
= convert_tree_comp_to_rtx (cmpcode
, unsignedp
);
8776 else if (COMPARISON_CLASS_P (treeop0
))
8778 type
= TREE_TYPE (TREE_OPERAND (treeop0
, 0));
8779 enum tree_code cmpcode
= TREE_CODE (treeop0
);
8780 op00
= expand_normal (TREE_OPERAND (treeop0
, 0));
8781 op01
= expand_normal (TREE_OPERAND (treeop0
, 1));
8782 unsignedp
= TYPE_UNSIGNED (type
);
8783 comparison_mode
= TYPE_MODE (type
);
8784 comparison_code
= convert_tree_comp_to_rtx (cmpcode
, unsignedp
);
8788 op00
= expand_normal (treeop0
);
8790 comparison_code
= NE
;
8791 comparison_mode
= GET_MODE (op00
);
8792 if (comparison_mode
== VOIDmode
)
8793 comparison_mode
= TYPE_MODE (TREE_TYPE (treeop0
));
8795 expanding_cond_expr_using_cmove
= false;
8797 if (GET_MODE (op1
) != mode
)
8798 op1
= gen_lowpart (mode
, op1
);
8800 if (GET_MODE (op2
) != mode
)
8801 op2
= gen_lowpart (mode
, op2
);
8803 /* Try to emit the conditional move. */
8804 insn
= emit_conditional_move (temp
, comparison_code
,
8805 op00
, op01
, comparison_mode
,
8809 /* If we could do the conditional move, emit the sequence,
8813 rtx_insn
*seq
= get_insns ();
8816 return convert_modes (orig_mode
, mode
, temp
, 0);
8819 /* Otherwise discard the sequence and fall back to code with
8825 /* A helper function for expand_expr_real_2 to be used with a
8826 misaligned mem_ref TEMP. Assume an unsigned type if UNSIGNEDP
8827 is nonzero, with alignment ALIGN in bits.
8828 Store the value at TARGET if possible (if TARGET is nonzero).
8829 Regardless of TARGET, we return the rtx for where the value is placed.
8830 If the result can be stored at TARGET, and ALT_RTL is non-NULL,
8831 then *ALT_RTL is set to TARGET (before legitimziation). */
8834 expand_misaligned_mem_ref (rtx temp
, machine_mode mode
, int unsignedp
,
8835 unsigned int align
, rtx target
, rtx
*alt_rtl
)
8837 enum insn_code icode
;
8839 if ((icode
= optab_handler (movmisalign_optab
, mode
))
8840 != CODE_FOR_nothing
)
8842 class expand_operand ops
[2];
8844 /* We've already validated the memory, and we're creating a
8845 new pseudo destination. The predicates really can't fail,
8846 nor can the generator. */
8847 create_output_operand (&ops
[0], NULL_RTX
, mode
);
8848 create_fixed_operand (&ops
[1], temp
);
8849 expand_insn (icode
, 2, ops
);
8850 temp
= ops
[0].value
;
8852 else if (targetm
.slow_unaligned_access (mode
, align
))
8853 temp
= extract_bit_field (temp
, GET_MODE_BITSIZE (mode
),
8854 0, unsignedp
, target
,
8855 mode
, mode
, false, alt_rtl
);
8859 /* Helper function of expand_expr_2, expand a division or modulo.
8860 op0 and op1 should be already expanded treeop0 and treeop1, using
8864 expand_expr_divmod (tree_code code
, machine_mode mode
, tree treeop0
,
8865 tree treeop1
, rtx op0
, rtx op1
, rtx target
, int unsignedp
)
8867 bool mod_p
= (code
== TRUNC_MOD_EXPR
|| code
== FLOOR_MOD_EXPR
8868 || code
== CEIL_MOD_EXPR
|| code
== ROUND_MOD_EXPR
);
8869 if (SCALAR_INT_MODE_P (mode
)
8871 && get_range_pos_neg (treeop0
) == 1
8872 && get_range_pos_neg (treeop1
) == 1)
8874 /* If both arguments are known to be positive when interpreted
8875 as signed, we can expand it as both signed and unsigned
8876 division or modulo. Choose the cheaper sequence in that case. */
8877 bool speed_p
= optimize_insn_for_speed_p ();
8878 do_pending_stack_adjust ();
8880 rtx uns_ret
= expand_divmod (mod_p
, code
, mode
, op0
, op1
, target
, 1);
8881 rtx_insn
*uns_insns
= get_insns ();
8884 rtx sgn_ret
= expand_divmod (mod_p
, code
, mode
, op0
, op1
, target
, 0);
8885 rtx_insn
*sgn_insns
= get_insns ();
8887 unsigned uns_cost
= seq_cost (uns_insns
, speed_p
);
8888 unsigned sgn_cost
= seq_cost (sgn_insns
, speed_p
);
8890 /* If costs are the same then use as tie breaker the other other
8892 if (uns_cost
== sgn_cost
)
8894 uns_cost
= seq_cost (uns_insns
, !speed_p
);
8895 sgn_cost
= seq_cost (sgn_insns
, !speed_p
);
8898 if (uns_cost
< sgn_cost
|| (uns_cost
== sgn_cost
&& unsignedp
))
8900 emit_insn (uns_insns
);
8903 emit_insn (sgn_insns
);
8906 return expand_divmod (mod_p
, code
, mode
, op0
, op1
, target
, unsignedp
);
8910 expand_expr_real_2 (sepops ops
, rtx target
, machine_mode tmode
,
8911 enum expand_modifier modifier
)
8913 rtx op0
, op1
, op2
, temp
;
8914 rtx_code_label
*lab
;
8918 scalar_int_mode int_mode
;
8919 enum tree_code code
= ops
->code
;
8921 rtx subtarget
, original_target
;
8923 bool reduce_bit_field
;
8924 location_t loc
= ops
->location
;
8925 tree treeop0
, treeop1
, treeop2
;
8926 #define REDUCE_BIT_FIELD(expr) (reduce_bit_field \
8927 ? reduce_to_bit_field_precision ((expr), \
8933 mode
= TYPE_MODE (type
);
8934 unsignedp
= TYPE_UNSIGNED (type
);
8940 /* We should be called only on simple (binary or unary) expressions,
8941 exactly those that are valid in gimple expressions that aren't
8942 GIMPLE_SINGLE_RHS (or invalid). */
8943 gcc_assert (get_gimple_rhs_class (code
) == GIMPLE_UNARY_RHS
8944 || get_gimple_rhs_class (code
) == GIMPLE_BINARY_RHS
8945 || get_gimple_rhs_class (code
) == GIMPLE_TERNARY_RHS
);
8947 ignore
= (target
== const0_rtx
8948 || ((CONVERT_EXPR_CODE_P (code
)
8949 || code
== COND_EXPR
|| code
== VIEW_CONVERT_EXPR
)
8950 && TREE_CODE (type
) == VOID_TYPE
));
8952 /* We should be called only if we need the result. */
8953 gcc_assert (!ignore
);
8955 /* An operation in what may be a bit-field type needs the
8956 result to be reduced to the precision of the bit-field type,
8957 which is narrower than that of the type's mode. */
8958 reduce_bit_field
= (INTEGRAL_TYPE_P (type
)
8959 && !type_has_mode_precision_p (type
));
8961 if (reduce_bit_field
8962 && (modifier
== EXPAND_STACK_PARM
8963 || (target
&& GET_MODE (target
) != mode
)))
8966 /* Use subtarget as the target for operand 0 of a binary operation. */
8967 subtarget
= get_subtarget (target
);
8968 original_target
= target
;
8972 case NON_LVALUE_EXPR
:
8975 if (treeop0
== error_mark_node
)
8978 if (TREE_CODE (type
) == UNION_TYPE
)
8980 tree valtype
= TREE_TYPE (treeop0
);
8982 /* If both input and output are BLKmode, this conversion isn't doing
8983 anything except possibly changing memory attribute. */
8984 if (mode
== BLKmode
&& TYPE_MODE (valtype
) == BLKmode
)
8986 rtx result
= expand_expr (treeop0
, target
, tmode
,
8989 result
= copy_rtx (result
);
8990 set_mem_attributes (result
, type
, 0);
8996 if (TYPE_MODE (type
) != BLKmode
)
8997 target
= gen_reg_rtx (TYPE_MODE (type
));
8999 target
= assign_temp (type
, 1, 1);
9003 /* Store data into beginning of memory target. */
9004 store_expr (treeop0
,
9005 adjust_address (target
, TYPE_MODE (valtype
), 0),
9006 modifier
== EXPAND_STACK_PARM
,
9007 false, TYPE_REVERSE_STORAGE_ORDER (type
));
9011 gcc_assert (REG_P (target
)
9012 && !TYPE_REVERSE_STORAGE_ORDER (type
));
9014 /* Store this field into a union of the proper type. */
9015 poly_uint64 op0_size
9016 = tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (treeop0
)));
9017 poly_uint64 union_size
= GET_MODE_BITSIZE (mode
);
9018 store_field (target
,
9019 /* The conversion must be constructed so that
9020 we know at compile time how many bits
9022 ordered_min (op0_size
, union_size
),
9023 0, 0, 0, TYPE_MODE (valtype
), treeop0
, 0,
9027 /* Return the entire union. */
9031 if (mode
== TYPE_MODE (TREE_TYPE (treeop0
)))
9033 op0
= expand_expr (treeop0
, target
, VOIDmode
,
9036 /* If the signedness of the conversion differs and OP0 is
9037 a promoted SUBREG, clear that indication since we now
9038 have to do the proper extension. */
9039 if (TYPE_UNSIGNED (TREE_TYPE (treeop0
)) != unsignedp
9040 && GET_CODE (op0
) == SUBREG
)
9041 SUBREG_PROMOTED_VAR_P (op0
) = 0;
9043 return REDUCE_BIT_FIELD (op0
);
9046 op0
= expand_expr (treeop0
, NULL_RTX
, mode
,
9047 modifier
== EXPAND_SUM
? EXPAND_NORMAL
: modifier
);
9048 if (GET_MODE (op0
) == mode
)
9051 /* If OP0 is a constant, just convert it into the proper mode. */
9052 else if (CONSTANT_P (op0
))
9054 tree inner_type
= TREE_TYPE (treeop0
);
9055 machine_mode inner_mode
= GET_MODE (op0
);
9057 if (inner_mode
== VOIDmode
)
9058 inner_mode
= TYPE_MODE (inner_type
);
9060 if (modifier
== EXPAND_INITIALIZER
)
9061 op0
= lowpart_subreg (mode
, op0
, inner_mode
);
9063 op0
= convert_modes (mode
, inner_mode
, op0
,
9064 TYPE_UNSIGNED (inner_type
));
9067 else if (modifier
== EXPAND_INITIALIZER
)
9068 op0
= gen_rtx_fmt_e (TYPE_UNSIGNED (TREE_TYPE (treeop0
))
9069 ? ZERO_EXTEND
: SIGN_EXTEND
, mode
, op0
);
9071 else if (target
== 0)
9072 op0
= convert_to_mode (mode
, op0
,
9073 TYPE_UNSIGNED (TREE_TYPE
9077 convert_move (target
, op0
,
9078 TYPE_UNSIGNED (TREE_TYPE (treeop0
)));
9082 return REDUCE_BIT_FIELD (op0
);
9084 case ADDR_SPACE_CONVERT_EXPR
:
9086 tree treeop0_type
= TREE_TYPE (treeop0
);
9088 gcc_assert (POINTER_TYPE_P (type
));
9089 gcc_assert (POINTER_TYPE_P (treeop0_type
));
9091 addr_space_t as_to
= TYPE_ADDR_SPACE (TREE_TYPE (type
));
9092 addr_space_t as_from
= TYPE_ADDR_SPACE (TREE_TYPE (treeop0_type
));
9094 /* Conversions between pointers to the same address space should
9095 have been implemented via CONVERT_EXPR / NOP_EXPR. */
9096 gcc_assert (as_to
!= as_from
);
9098 op0
= expand_expr (treeop0
, NULL_RTX
, VOIDmode
, modifier
);
9100 /* Ask target code to handle conversion between pointers
9101 to overlapping address spaces. */
9102 if (targetm
.addr_space
.subset_p (as_to
, as_from
)
9103 || targetm
.addr_space
.subset_p (as_from
, as_to
))
9105 op0
= targetm
.addr_space
.convert (op0
, treeop0_type
, type
);
9109 /* For disjoint address spaces, converting anything but a null
9110 pointer invokes undefined behavior. We truncate or extend the
9111 value as if we'd converted via integers, which handles 0 as
9112 required, and all others as the programmer likely expects. */
9113 #ifndef POINTERS_EXTEND_UNSIGNED
9114 const int POINTERS_EXTEND_UNSIGNED
= 1;
9116 op0
= convert_modes (mode
, TYPE_MODE (treeop0_type
),
9117 op0
, POINTERS_EXTEND_UNSIGNED
);
9123 case POINTER_PLUS_EXPR
:
9124 /* Even though the sizetype mode and the pointer's mode can be different
9125 expand is able to handle this correctly and get the correct result out
9126 of the PLUS_EXPR code. */
9127 /* Make sure to sign-extend the sizetype offset in a POINTER_PLUS_EXPR
9128 if sizetype precision is smaller than pointer precision. */
9129 if (TYPE_PRECISION (sizetype
) < TYPE_PRECISION (type
))
9130 treeop1
= fold_convert_loc (loc
, type
,
9131 fold_convert_loc (loc
, ssizetype
,
9133 /* If sizetype precision is larger than pointer precision, truncate the
9134 offset to have matching modes. */
9135 else if (TYPE_PRECISION (sizetype
) > TYPE_PRECISION (type
))
9136 treeop1
= fold_convert_loc (loc
, type
, treeop1
);
9140 /* If we are adding a constant, a VAR_DECL that is sp, fp, or ap, and
9141 something else, make sure we add the register to the constant and
9142 then to the other thing. This case can occur during strength
9143 reduction and doing it this way will produce better code if the
9144 frame pointer or argument pointer is eliminated.
9146 fold-const.c will ensure that the constant is always in the inner
9147 PLUS_EXPR, so the only case we need to do anything about is if
9148 sp, ap, or fp is our second argument, in which case we must swap
9149 the innermost first argument and our second argument. */
9151 if (TREE_CODE (treeop0
) == PLUS_EXPR
9152 && TREE_CODE (TREE_OPERAND (treeop0
, 1)) == INTEGER_CST
9154 && (DECL_RTL (treeop1
) == frame_pointer_rtx
9155 || DECL_RTL (treeop1
) == stack_pointer_rtx
9156 || DECL_RTL (treeop1
) == arg_pointer_rtx
))
9161 /* If the result is to be ptr_mode and we are adding an integer to
9162 something, we might be forming a constant. So try to use
9163 plus_constant. If it produces a sum and we can't accept it,
9164 use force_operand. This allows P = &ARR[const] to generate
9165 efficient code on machines where a SYMBOL_REF is not a valid
9168 If this is an EXPAND_SUM call, always return the sum. */
9169 if (modifier
== EXPAND_SUM
|| modifier
== EXPAND_INITIALIZER
9170 || (mode
== ptr_mode
&& (unsignedp
|| ! flag_trapv
)))
9172 if (modifier
== EXPAND_STACK_PARM
)
9174 if (TREE_CODE (treeop0
) == INTEGER_CST
9175 && HWI_COMPUTABLE_MODE_P (mode
)
9176 && TREE_CONSTANT (treeop1
))
9180 machine_mode wmode
= TYPE_MODE (TREE_TYPE (treeop1
));
9182 op1
= expand_expr (treeop1
, subtarget
, VOIDmode
,
9184 /* Use wi::shwi to ensure that the constant is
9185 truncated according to the mode of OP1, then sign extended
9186 to a HOST_WIDE_INT. Using the constant directly can result
9187 in non-canonical RTL in a 64x32 cross compile. */
9188 wc
= TREE_INT_CST_LOW (treeop0
);
9190 immed_wide_int_const (wi::shwi (wc
, wmode
), wmode
);
9191 op1
= plus_constant (mode
, op1
, INTVAL (constant_part
));
9192 if (modifier
!= EXPAND_SUM
&& modifier
!= EXPAND_INITIALIZER
)
9193 op1
= force_operand (op1
, target
);
9194 return REDUCE_BIT_FIELD (op1
);
9197 else if (TREE_CODE (treeop1
) == INTEGER_CST
9198 && HWI_COMPUTABLE_MODE_P (mode
)
9199 && TREE_CONSTANT (treeop0
))
9203 machine_mode wmode
= TYPE_MODE (TREE_TYPE (treeop0
));
9205 op0
= expand_expr (treeop0
, subtarget
, VOIDmode
,
9206 (modifier
== EXPAND_INITIALIZER
9207 ? EXPAND_INITIALIZER
: EXPAND_SUM
));
9208 if (! CONSTANT_P (op0
))
9210 op1
= expand_expr (treeop1
, NULL_RTX
,
9211 VOIDmode
, modifier
);
9212 /* Return a PLUS if modifier says it's OK. */
9213 if (modifier
== EXPAND_SUM
9214 || modifier
== EXPAND_INITIALIZER
)
9215 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
9218 /* Use wi::shwi to ensure that the constant is
9219 truncated according to the mode of OP1, then sign extended
9220 to a HOST_WIDE_INT. Using the constant directly can result
9221 in non-canonical RTL in a 64x32 cross compile. */
9222 wc
= TREE_INT_CST_LOW (treeop1
);
9224 = immed_wide_int_const (wi::shwi (wc
, wmode
), wmode
);
9225 op0
= plus_constant (mode
, op0
, INTVAL (constant_part
));
9226 if (modifier
!= EXPAND_SUM
&& modifier
!= EXPAND_INITIALIZER
)
9227 op0
= force_operand (op0
, target
);
9228 return REDUCE_BIT_FIELD (op0
);
9232 /* Use TER to expand pointer addition of a negated value
9233 as pointer subtraction. */
9234 if ((POINTER_TYPE_P (TREE_TYPE (treeop0
))
9235 || (TREE_CODE (TREE_TYPE (treeop0
)) == VECTOR_TYPE
9236 && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (treeop0
)))))
9237 && TREE_CODE (treeop1
) == SSA_NAME
9238 && TYPE_MODE (TREE_TYPE (treeop0
))
9239 == TYPE_MODE (TREE_TYPE (treeop1
)))
9241 gimple
*def
= get_def_for_expr (treeop1
, NEGATE_EXPR
);
9244 treeop1
= gimple_assign_rhs1 (def
);
9250 /* No sense saving up arithmetic to be done
9251 if it's all in the wrong mode to form part of an address.
9252 And force_operand won't know whether to sign-extend or
9254 if (modifier
!= EXPAND_INITIALIZER
9255 && (modifier
!= EXPAND_SUM
|| mode
!= ptr_mode
))
9257 expand_operands (treeop0
, treeop1
,
9258 subtarget
, &op0
, &op1
, modifier
);
9259 if (op0
== const0_rtx
)
9261 if (op1
== const0_rtx
)
9266 expand_operands (treeop0
, treeop1
,
9267 subtarget
, &op0
, &op1
, modifier
);
9268 return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS
, mode
, op0
, op1
));
9271 case POINTER_DIFF_EXPR
:
9273 /* For initializers, we are allowed to return a MINUS of two
9274 symbolic constants. Here we handle all cases when both operands
9276 /* Handle difference of two symbolic constants,
9277 for the sake of an initializer. */
9278 if ((modifier
== EXPAND_SUM
|| modifier
== EXPAND_INITIALIZER
)
9279 && really_constant_p (treeop0
)
9280 && really_constant_p (treeop1
))
9282 expand_operands (treeop0
, treeop1
,
9283 NULL_RTX
, &op0
, &op1
, modifier
);
9284 return simplify_gen_binary (MINUS
, mode
, op0
, op1
);
9287 /* No sense saving up arithmetic to be done
9288 if it's all in the wrong mode to form part of an address.
9289 And force_operand won't know whether to sign-extend or
9291 if (modifier
!= EXPAND_INITIALIZER
9292 && (modifier
!= EXPAND_SUM
|| mode
!= ptr_mode
))
9295 expand_operands (treeop0
, treeop1
,
9296 subtarget
, &op0
, &op1
, modifier
);
9298 /* Convert A - const to A + (-const). */
9299 if (CONST_INT_P (op1
))
9301 op1
= negate_rtx (mode
, op1
);
9302 return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS
, mode
, op0
, op1
));
9307 case WIDEN_MULT_PLUS_EXPR
:
9308 case WIDEN_MULT_MINUS_EXPR
:
9309 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
9310 op2
= expand_normal (treeop2
);
9311 target
= expand_widen_pattern_expr (ops
, op0
, op1
, op2
,
9315 case WIDEN_PLUS_EXPR
:
9316 case WIDEN_MINUS_EXPR
:
9317 case WIDEN_MULT_EXPR
:
9318 /* If first operand is constant, swap them.
9319 Thus the following special case checks need only
9320 check the second operand. */
9321 if (TREE_CODE (treeop0
) == INTEGER_CST
)
9322 std::swap (treeop0
, treeop1
);
9324 /* First, check if we have a multiplication of one signed and one
9325 unsigned operand. */
9326 if (TREE_CODE (treeop1
) != INTEGER_CST
9327 && (TYPE_UNSIGNED (TREE_TYPE (treeop0
))
9328 != TYPE_UNSIGNED (TREE_TYPE (treeop1
))))
9330 machine_mode innermode
= TYPE_MODE (TREE_TYPE (treeop0
));
9331 this_optab
= usmul_widen_optab
;
9332 if (find_widening_optab_handler (this_optab
, mode
, innermode
)
9333 != CODE_FOR_nothing
)
9335 if (TYPE_UNSIGNED (TREE_TYPE (treeop0
)))
9336 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
,
9339 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op1
, &op0
,
9341 /* op0 and op1 might still be constant, despite the above
9342 != INTEGER_CST check. Handle it. */
9343 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
9345 op0
= convert_modes (mode
, innermode
, op0
, true);
9346 op1
= convert_modes (mode
, innermode
, op1
, false);
9347 return REDUCE_BIT_FIELD (expand_mult (mode
, op0
, op1
,
9348 target
, unsignedp
));
9353 /* Check for a multiplication with matching signedness. */
9354 else if ((TREE_CODE (treeop1
) == INTEGER_CST
9355 && int_fits_type_p (treeop1
, TREE_TYPE (treeop0
)))
9356 || (TYPE_UNSIGNED (TREE_TYPE (treeop1
))
9357 == TYPE_UNSIGNED (TREE_TYPE (treeop0
))))
9359 tree op0type
= TREE_TYPE (treeop0
);
9360 machine_mode innermode
= TYPE_MODE (op0type
);
9361 bool zextend_p
= TYPE_UNSIGNED (op0type
);
9362 optab other_optab
= zextend_p
? smul_widen_optab
: umul_widen_optab
;
9363 this_optab
= zextend_p
? umul_widen_optab
: smul_widen_optab
;
9365 if (TREE_CODE (treeop0
) != INTEGER_CST
)
9367 if (find_widening_optab_handler (this_optab
, mode
, innermode
)
9368 != CODE_FOR_nothing
)
9370 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
,
9372 /* op0 and op1 might still be constant, despite the above
9373 != INTEGER_CST check. Handle it. */
9374 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
9377 op0
= convert_modes (mode
, innermode
, op0
, zextend_p
);
9379 = convert_modes (mode
, innermode
, op1
,
9380 TYPE_UNSIGNED (TREE_TYPE (treeop1
)));
9381 return REDUCE_BIT_FIELD (expand_mult (mode
, op0
, op1
,
9385 temp
= expand_widening_mult (mode
, op0
, op1
, target
,
9386 unsignedp
, this_optab
);
9387 return REDUCE_BIT_FIELD (temp
);
9389 if (find_widening_optab_handler (other_optab
, mode
, innermode
)
9391 && innermode
== word_mode
)
9394 op0
= expand_normal (treeop0
);
9395 op1
= expand_normal (treeop1
);
9396 /* op0 and op1 might be constants, despite the above
9397 != INTEGER_CST check. Handle it. */
9398 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
9399 goto widen_mult_const
;
9400 temp
= expand_binop (mode
, other_optab
, op0
, op1
, target
,
9401 unsignedp
, OPTAB_LIB_WIDEN
);
9402 hipart
= gen_highpart (word_mode
, temp
);
9403 htem
= expand_mult_highpart_adjust (word_mode
, hipart
,
9407 emit_move_insn (hipart
, htem
);
9408 return REDUCE_BIT_FIELD (temp
);
9412 treeop0
= fold_build1 (CONVERT_EXPR
, type
, treeop0
);
9413 treeop1
= fold_build1 (CONVERT_EXPR
, type
, treeop1
);
9414 expand_operands (treeop0
, treeop1
, subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
9415 return REDUCE_BIT_FIELD (expand_mult (mode
, op0
, op1
, target
, unsignedp
));
9418 /* If this is a fixed-point operation, then we cannot use the code
9419 below because "expand_mult" doesn't support sat/no-sat fixed-point
9421 if (ALL_FIXED_POINT_MODE_P (mode
))
9424 /* If first operand is constant, swap them.
9425 Thus the following special case checks need only
9426 check the second operand. */
9427 if (TREE_CODE (treeop0
) == INTEGER_CST
)
9428 std::swap (treeop0
, treeop1
);
9430 /* Attempt to return something suitable for generating an
9431 indexed address, for machines that support that. */
9433 if (modifier
== EXPAND_SUM
&& mode
== ptr_mode
9434 && tree_fits_shwi_p (treeop1
))
9436 tree exp1
= treeop1
;
9438 op0
= expand_expr (treeop0
, subtarget
, VOIDmode
,
9442 op0
= force_operand (op0
, NULL_RTX
);
9444 op0
= copy_to_mode_reg (mode
, op0
);
9446 op1
= gen_int_mode (tree_to_shwi (exp1
),
9447 TYPE_MODE (TREE_TYPE (exp1
)));
9448 return REDUCE_BIT_FIELD (gen_rtx_MULT (mode
, op0
, op1
));
9451 if (modifier
== EXPAND_STACK_PARM
)
9454 if (SCALAR_INT_MODE_P (mode
) && optimize
>= 2)
9456 gimple
*def_stmt0
= get_def_for_expr (treeop0
, TRUNC_DIV_EXPR
);
9457 gimple
*def_stmt1
= get_def_for_expr (treeop1
, TRUNC_DIV_EXPR
);
9459 && !operand_equal_p (treeop1
, gimple_assign_rhs2 (def_stmt0
), 0))
9462 && !operand_equal_p (treeop0
, gimple_assign_rhs2 (def_stmt1
), 0))
9465 if (def_stmt0
|| def_stmt1
)
9467 /* X / Y * Y can be expanded as X - X % Y too.
9468 Choose the cheaper sequence of those two. */
9470 treeop0
= gimple_assign_rhs1 (def_stmt0
);
9474 treeop0
= gimple_assign_rhs1 (def_stmt1
);
9476 expand_operands (treeop0
, treeop1
, subtarget
, &op0
, &op1
,
9478 bool speed_p
= optimize_insn_for_speed_p ();
9479 do_pending_stack_adjust ();
9482 = expand_expr_divmod (TRUNC_DIV_EXPR
, mode
, treeop0
, treeop1
,
9483 op0
, op1
, NULL_RTX
, unsignedp
);
9484 divmul_ret
= expand_mult (mode
, divmul_ret
, op1
, target
,
9486 rtx_insn
*divmul_insns
= get_insns ();
9490 = expand_expr_divmod (TRUNC_MOD_EXPR
, mode
, treeop0
, treeop1
,
9491 op0
, op1
, NULL_RTX
, unsignedp
);
9492 this_optab
= optab_for_tree_code (MINUS_EXPR
, type
,
9494 modsub_ret
= expand_binop (mode
, this_optab
, op0
, modsub_ret
,
9495 target
, unsignedp
, OPTAB_LIB_WIDEN
);
9496 rtx_insn
*modsub_insns
= get_insns ();
9498 unsigned divmul_cost
= seq_cost (divmul_insns
, speed_p
);
9499 unsigned modsub_cost
= seq_cost (modsub_insns
, speed_p
);
9500 /* If costs are the same then use as tie breaker the other other
9502 if (divmul_cost
== modsub_cost
)
9504 divmul_cost
= seq_cost (divmul_insns
, !speed_p
);
9505 modsub_cost
= seq_cost (modsub_insns
, !speed_p
);
9508 if (divmul_cost
<= modsub_cost
)
9510 emit_insn (divmul_insns
);
9511 return REDUCE_BIT_FIELD (divmul_ret
);
9513 emit_insn (modsub_insns
);
9514 return REDUCE_BIT_FIELD (modsub_ret
);
9518 expand_operands (treeop0
, treeop1
, subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
9519 return REDUCE_BIT_FIELD (expand_mult (mode
, op0
, op1
, target
, unsignedp
));
9521 case TRUNC_MOD_EXPR
:
9522 case FLOOR_MOD_EXPR
:
9524 case ROUND_MOD_EXPR
:
9526 case TRUNC_DIV_EXPR
:
9527 case FLOOR_DIV_EXPR
:
9529 case ROUND_DIV_EXPR
:
9530 case EXACT_DIV_EXPR
:
9531 /* If this is a fixed-point operation, then we cannot use the code
9532 below because "expand_divmod" doesn't support sat/no-sat fixed-point
9534 if (ALL_FIXED_POINT_MODE_P (mode
))
9537 if (modifier
== EXPAND_STACK_PARM
)
9539 /* Possible optimization: compute the dividend with EXPAND_SUM
9540 then if the divisor is constant can optimize the case
9541 where some terms of the dividend have coeffs divisible by it. */
9542 expand_operands (treeop0
, treeop1
, subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
9543 return expand_expr_divmod (code
, mode
, treeop0
, treeop1
, op0
, op1
,
9549 case MULT_HIGHPART_EXPR
:
9550 expand_operands (treeop0
, treeop1
, subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
9551 temp
= expand_mult_highpart (mode
, op0
, op1
, target
, unsignedp
);
9555 case FIXED_CONVERT_EXPR
:
9556 op0
= expand_normal (treeop0
);
9557 if (target
== 0 || modifier
== EXPAND_STACK_PARM
)
9558 target
= gen_reg_rtx (mode
);
9560 if ((TREE_CODE (TREE_TYPE (treeop0
)) == INTEGER_TYPE
9561 && TYPE_UNSIGNED (TREE_TYPE (treeop0
)))
9562 || (TREE_CODE (type
) == INTEGER_TYPE
&& TYPE_UNSIGNED (type
)))
9563 expand_fixed_convert (target
, op0
, 1, TYPE_SATURATING (type
));
9565 expand_fixed_convert (target
, op0
, 0, TYPE_SATURATING (type
));
9568 case FIX_TRUNC_EXPR
:
9569 op0
= expand_normal (treeop0
);
9570 if (target
== 0 || modifier
== EXPAND_STACK_PARM
)
9571 target
= gen_reg_rtx (mode
);
9572 expand_fix (target
, op0
, unsignedp
);
9576 op0
= expand_normal (treeop0
);
9577 if (target
== 0 || modifier
== EXPAND_STACK_PARM
)
9578 target
= gen_reg_rtx (mode
);
9579 /* expand_float can't figure out what to do if FROM has VOIDmode.
9580 So give it the correct mode. With -O, cse will optimize this. */
9581 if (GET_MODE (op0
) == VOIDmode
)
9582 op0
= copy_to_mode_reg (TYPE_MODE (TREE_TYPE (treeop0
)),
9584 expand_float (target
, op0
,
9585 TYPE_UNSIGNED (TREE_TYPE (treeop0
)));
9589 op0
= expand_expr (treeop0
, subtarget
,
9590 VOIDmode
, EXPAND_NORMAL
);
9591 if (modifier
== EXPAND_STACK_PARM
)
9593 temp
= expand_unop (mode
,
9594 optab_for_tree_code (NEGATE_EXPR
, type
,
9598 return REDUCE_BIT_FIELD (temp
);
9602 op0
= expand_expr (treeop0
, subtarget
,
9603 VOIDmode
, EXPAND_NORMAL
);
9604 if (modifier
== EXPAND_STACK_PARM
)
9607 /* ABS_EXPR is not valid for complex arguments. */
9608 gcc_assert (GET_MODE_CLASS (mode
) != MODE_COMPLEX_INT
9609 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_FLOAT
);
9611 /* Unsigned abs is simply the operand. Testing here means we don't
9612 risk generating incorrect code below. */
9613 if (TYPE_UNSIGNED (TREE_TYPE (treeop0
)))
9616 return expand_abs (mode
, op0
, target
, unsignedp
,
9617 safe_from_p (target
, treeop0
, 1));
9621 target
= original_target
;
9623 || modifier
== EXPAND_STACK_PARM
9624 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
9625 || GET_MODE (target
) != mode
9627 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
9628 target
= gen_reg_rtx (mode
);
9629 expand_operands (treeop0
, treeop1
,
9630 target
, &op0
, &op1
, EXPAND_NORMAL
);
9632 /* First try to do it with a special MIN or MAX instruction.
9633 If that does not win, use a conditional jump to select the proper
9635 this_optab
= optab_for_tree_code (code
, type
, optab_default
);
9636 temp
= expand_binop (mode
, this_optab
, op0
, op1
, target
, unsignedp
,
9641 if (VECTOR_TYPE_P (type
))
9644 /* At this point, a MEM target is no longer useful; we will get better
9647 if (! REG_P (target
))
9648 target
= gen_reg_rtx (mode
);
9650 /* If op1 was placed in target, swap op0 and op1. */
9651 if (target
!= op0
&& target
== op1
)
9652 std::swap (op0
, op1
);
9654 /* We generate better code and avoid problems with op1 mentioning
9655 target by forcing op1 into a pseudo if it isn't a constant. */
9656 if (! CONSTANT_P (op1
))
9657 op1
= force_reg (mode
, op1
);
9660 enum rtx_code comparison_code
;
9663 if (code
== MAX_EXPR
)
9664 comparison_code
= unsignedp
? GEU
: GE
;
9666 comparison_code
= unsignedp
? LEU
: LE
;
9668 /* Canonicalize to comparisons against 0. */
9669 if (op1
== const1_rtx
)
9671 /* Converting (a >= 1 ? a : 1) into (a > 0 ? a : 1)
9672 or (a != 0 ? a : 1) for unsigned.
9673 For MIN we are safe converting (a <= 1 ? a : 1)
9674 into (a <= 0 ? a : 1) */
9675 cmpop1
= const0_rtx
;
9676 if (code
== MAX_EXPR
)
9677 comparison_code
= unsignedp
? NE
: GT
;
9679 if (op1
== constm1_rtx
&& !unsignedp
)
9681 /* Converting (a >= -1 ? a : -1) into (a >= 0 ? a : -1)
9682 and (a <= -1 ? a : -1) into (a < 0 ? a : -1) */
9683 cmpop1
= const0_rtx
;
9684 if (code
== MIN_EXPR
)
9685 comparison_code
= LT
;
9688 /* Use a conditional move if possible. */
9689 if (can_conditionally_move_p (mode
))
9695 /* Try to emit the conditional move. */
9696 insn
= emit_conditional_move (target
, comparison_code
,
9701 /* If we could do the conditional move, emit the sequence,
9705 rtx_insn
*seq
= get_insns ();
9711 /* Otherwise discard the sequence and fall back to code with
9717 emit_move_insn (target
, op0
);
9719 lab
= gen_label_rtx ();
9720 do_compare_rtx_and_jump (target
, cmpop1
, comparison_code
,
9721 unsignedp
, mode
, NULL_RTX
, NULL
, lab
,
9722 profile_probability::uninitialized ());
9724 emit_move_insn (target
, op1
);
9729 op0
= expand_expr (treeop0
, subtarget
,
9730 VOIDmode
, EXPAND_NORMAL
);
9731 if (modifier
== EXPAND_STACK_PARM
)
9733 /* In case we have to reduce the result to bitfield precision
9734 for unsigned bitfield expand this as XOR with a proper constant
9736 if (reduce_bit_field
&& TYPE_UNSIGNED (type
))
9738 int_mode
= SCALAR_INT_TYPE_MODE (type
);
9739 wide_int mask
= wi::mask (TYPE_PRECISION (type
),
9740 false, GET_MODE_PRECISION (int_mode
));
9742 temp
= expand_binop (int_mode
, xor_optab
, op0
,
9743 immed_wide_int_const (mask
, int_mode
),
9744 target
, 1, OPTAB_LIB_WIDEN
);
9747 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, target
, 1);
9751 /* ??? Can optimize bitwise operations with one arg constant.
9752 Can optimize (a bitwise1 n) bitwise2 (a bitwise3 b)
9753 and (a bitwise1 b) bitwise2 b (etc)
9754 but that is probably not worth while. */
9763 gcc_assert (VECTOR_MODE_P (TYPE_MODE (type
))
9764 || type_has_mode_precision_p (type
));
9770 /* If this is a fixed-point operation, then we cannot use the code
9771 below because "expand_shift" doesn't support sat/no-sat fixed-point
9773 if (ALL_FIXED_POINT_MODE_P (mode
))
9776 if (! safe_from_p (subtarget
, treeop1
, 1))
9778 if (modifier
== EXPAND_STACK_PARM
)
9780 op0
= expand_expr (treeop0
, subtarget
,
9781 VOIDmode
, EXPAND_NORMAL
);
9783 /* Left shift optimization when shifting across word_size boundary.
9785 If mode == GET_MODE_WIDER_MODE (word_mode), then normally
9786 there isn't native instruction to support this wide mode
9787 left shift. Given below scenario:
9789 Type A = (Type) B << C
9792 | dest_high | dest_low |
9796 If the shift amount C caused we shift B to across the word
9797 size boundary, i.e part of B shifted into high half of
9798 destination register, and part of B remains in the low
9799 half, then GCC will use the following left shift expand
9802 1. Initialize dest_low to B.
9803 2. Initialize every bit of dest_high to the sign bit of B.
9804 3. Logic left shift dest_low by C bit to finalize dest_low.
9805 The value of dest_low before this shift is kept in a temp D.
9806 4. Logic left shift dest_high by C.
9807 5. Logic right shift D by (word_size - C).
9808 6. Or the result of 4 and 5 to finalize dest_high.
9810 While, by checking gimple statements, if operand B is
9811 coming from signed extension, then we can simplify above
9814 1. dest_high = src_low >> (word_size - C).
9815 2. dest_low = src_low << C.
9817 We can use one arithmetic right shift to finish all the
9818 purpose of steps 2, 4, 5, 6, thus we reduce the steps
9819 needed from 6 into 2.
9821 The case is similar for zero extension, except that we
9822 initialize dest_high to zero rather than copies of the sign
9823 bit from B. Furthermore, we need to use a logical right shift
9826 The choice of sign-extension versus zero-extension is
9827 determined entirely by whether or not B is signed and is
9828 independent of the current setting of unsignedp. */
9831 if (code
== LSHIFT_EXPR
9834 && GET_MODE_2XWIDER_MODE (word_mode
).exists (&int_mode
)
9836 && TREE_CONSTANT (treeop1
)
9837 && TREE_CODE (treeop0
) == SSA_NAME
)
9839 gimple
*def
= SSA_NAME_DEF_STMT (treeop0
);
9840 if (is_gimple_assign (def
)
9841 && gimple_assign_rhs_code (def
) == NOP_EXPR
)
9843 scalar_int_mode rmode
= SCALAR_INT_TYPE_MODE
9844 (TREE_TYPE (gimple_assign_rhs1 (def
)));
9846 if (GET_MODE_SIZE (rmode
) < GET_MODE_SIZE (int_mode
)
9847 && TREE_INT_CST_LOW (treeop1
) < GET_MODE_BITSIZE (word_mode
)
9848 && ((TREE_INT_CST_LOW (treeop1
) + GET_MODE_BITSIZE (rmode
))
9849 >= GET_MODE_BITSIZE (word_mode
)))
9851 rtx_insn
*seq
, *seq_old
;
9852 poly_uint64 high_off
= subreg_highpart_offset (word_mode
,
9854 bool extend_unsigned
9855 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def
)));
9856 rtx low
= lowpart_subreg (word_mode
, op0
, int_mode
);
9857 rtx dest_low
= lowpart_subreg (word_mode
, target
, int_mode
);
9858 rtx dest_high
= simplify_gen_subreg (word_mode
, target
,
9859 int_mode
, high_off
);
9860 HOST_WIDE_INT ramount
= (BITS_PER_WORD
9861 - TREE_INT_CST_LOW (treeop1
));
9862 tree rshift
= build_int_cst (TREE_TYPE (treeop1
), ramount
);
9865 /* dest_high = src_low >> (word_size - C). */
9866 temp
= expand_variable_shift (RSHIFT_EXPR
, word_mode
, low
,
9869 if (temp
!= dest_high
)
9870 emit_move_insn (dest_high
, temp
);
9872 /* dest_low = src_low << C. */
9873 temp
= expand_variable_shift (LSHIFT_EXPR
, word_mode
, low
,
9874 treeop1
, dest_low
, unsignedp
);
9875 if (temp
!= dest_low
)
9876 emit_move_insn (dest_low
, temp
);
9882 if (have_insn_for (ASHIFT
, int_mode
))
9884 bool speed_p
= optimize_insn_for_speed_p ();
9886 rtx ret_old
= expand_variable_shift (code
, int_mode
,
9891 seq_old
= get_insns ();
9893 if (seq_cost (seq
, speed_p
)
9894 >= seq_cost (seq_old
, speed_p
))
9905 if (temp
== NULL_RTX
)
9906 temp
= expand_variable_shift (code
, mode
, op0
, treeop1
, target
,
9908 if (code
== LSHIFT_EXPR
)
9909 temp
= REDUCE_BIT_FIELD (temp
);
9913 /* Could determine the answer when only additive constants differ. Also,
9914 the addition of one can be handled by changing the condition. */
9921 case UNORDERED_EXPR
:
9930 temp
= do_store_flag (ops
,
9931 modifier
!= EXPAND_STACK_PARM
? target
: NULL_RTX
,
9932 tmode
!= VOIDmode
? tmode
: mode
);
9936 /* Use a compare and a jump for BLKmode comparisons, or for function
9937 type comparisons is have_canonicalize_funcptr_for_compare. */
9940 || modifier
== EXPAND_STACK_PARM
9941 || ! safe_from_p (target
, treeop0
, 1)
9942 || ! safe_from_p (target
, treeop1
, 1)
9943 /* Make sure we don't have a hard reg (such as function's return
9944 value) live across basic blocks, if not optimizing. */
9945 || (!optimize
&& REG_P (target
)
9946 && REGNO (target
) < FIRST_PSEUDO_REGISTER
)))
9947 target
= gen_reg_rtx (tmode
!= VOIDmode
? tmode
: mode
);
9949 emit_move_insn (target
, const0_rtx
);
9951 rtx_code_label
*lab1
= gen_label_rtx ();
9952 jumpifnot_1 (code
, treeop0
, treeop1
, lab1
,
9953 profile_probability::uninitialized ());
9955 if (TYPE_PRECISION (type
) == 1 && !TYPE_UNSIGNED (type
))
9956 emit_move_insn (target
, constm1_rtx
);
9958 emit_move_insn (target
, const1_rtx
);
9964 /* Get the rtx code of the operands. */
9965 op0
= expand_normal (treeop0
);
9966 op1
= expand_normal (treeop1
);
9969 target
= gen_reg_rtx (TYPE_MODE (type
));
9971 /* If target overlaps with op1, then either we need to force
9972 op1 into a pseudo (if target also overlaps with op0),
9973 or write the complex parts in reverse order. */
9974 switch (GET_CODE (target
))
9977 if (reg_overlap_mentioned_p (XEXP (target
, 0), op1
))
9979 if (reg_overlap_mentioned_p (XEXP (target
, 1), op0
))
9981 complex_expr_force_op1
:
9982 temp
= gen_reg_rtx (GET_MODE_INNER (GET_MODE (target
)));
9983 emit_move_insn (temp
, op1
);
9987 complex_expr_swap_order
:
9988 /* Move the imaginary (op1) and real (op0) parts to their
9990 write_complex_part (target
, op1
, true);
9991 write_complex_part (target
, op0
, false);
9997 temp
= adjust_address_nv (target
,
9998 GET_MODE_INNER (GET_MODE (target
)), 0);
9999 if (reg_overlap_mentioned_p (temp
, op1
))
10001 scalar_mode imode
= GET_MODE_INNER (GET_MODE (target
));
10002 temp
= adjust_address_nv (target
, imode
,
10003 GET_MODE_SIZE (imode
));
10004 if (reg_overlap_mentioned_p (temp
, op0
))
10005 goto complex_expr_force_op1
;
10006 goto complex_expr_swap_order
;
10010 if (reg_overlap_mentioned_p (target
, op1
))
10012 if (reg_overlap_mentioned_p (target
, op0
))
10013 goto complex_expr_force_op1
;
10014 goto complex_expr_swap_order
;
10019 /* Move the real (op0) and imaginary (op1) parts to their location. */
10020 write_complex_part (target
, op0
, false);
10021 write_complex_part (target
, op1
, true);
10025 case WIDEN_SUM_EXPR
:
10027 tree oprnd0
= treeop0
;
10028 tree oprnd1
= treeop1
;
10030 expand_operands (oprnd0
, oprnd1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
10031 target
= expand_widen_pattern_expr (ops
, op0
, NULL_RTX
, op1
,
10032 target
, unsignedp
);
10036 case VEC_UNPACK_HI_EXPR
:
10037 case VEC_UNPACK_LO_EXPR
:
10038 case VEC_UNPACK_FIX_TRUNC_HI_EXPR
:
10039 case VEC_UNPACK_FIX_TRUNC_LO_EXPR
:
10041 op0
= expand_normal (treeop0
);
10042 temp
= expand_widen_pattern_expr (ops
, op0
, NULL_RTX
, NULL_RTX
,
10043 target
, unsignedp
);
10048 case VEC_UNPACK_FLOAT_HI_EXPR
:
10049 case VEC_UNPACK_FLOAT_LO_EXPR
:
10051 op0
= expand_normal (treeop0
);
10052 /* The signedness is determined from input operand. */
10053 temp
= expand_widen_pattern_expr
10054 (ops
, op0
, NULL_RTX
, NULL_RTX
,
10055 target
, TYPE_UNSIGNED (TREE_TYPE (treeop0
)));
10061 case VEC_WIDEN_PLUS_HI_EXPR
:
10062 case VEC_WIDEN_PLUS_LO_EXPR
:
10063 case VEC_WIDEN_MINUS_HI_EXPR
:
10064 case VEC_WIDEN_MINUS_LO_EXPR
:
10065 case VEC_WIDEN_MULT_HI_EXPR
:
10066 case VEC_WIDEN_MULT_LO_EXPR
:
10067 case VEC_WIDEN_MULT_EVEN_EXPR
:
10068 case VEC_WIDEN_MULT_ODD_EXPR
:
10069 case VEC_WIDEN_LSHIFT_HI_EXPR
:
10070 case VEC_WIDEN_LSHIFT_LO_EXPR
:
10071 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
10072 target
= expand_widen_pattern_expr (ops
, op0
, op1
, NULL_RTX
,
10073 target
, unsignedp
);
10074 gcc_assert (target
);
10077 case VEC_PACK_SAT_EXPR
:
10078 case VEC_PACK_FIX_TRUNC_EXPR
:
10079 mode
= TYPE_MODE (TREE_TYPE (treeop0
));
10080 subtarget
= NULL_RTX
;
10083 case VEC_PACK_TRUNC_EXPR
:
10084 if (VECTOR_BOOLEAN_TYPE_P (type
)
10085 && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (treeop0
))
10086 && mode
== TYPE_MODE (TREE_TYPE (treeop0
))
10087 && SCALAR_INT_MODE_P (mode
))
10089 class expand_operand eops
[4];
10090 machine_mode imode
= TYPE_MODE (TREE_TYPE (treeop0
));
10091 expand_operands (treeop0
, treeop1
,
10092 subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
10093 this_optab
= vec_pack_sbool_trunc_optab
;
10094 enum insn_code icode
= optab_handler (this_optab
, imode
);
10095 create_output_operand (&eops
[0], target
, mode
);
10096 create_convert_operand_from (&eops
[1], op0
, imode
, false);
10097 create_convert_operand_from (&eops
[2], op1
, imode
, false);
10098 temp
= GEN_INT (TYPE_VECTOR_SUBPARTS (type
).to_constant ());
10099 create_input_operand (&eops
[3], temp
, imode
);
10100 expand_insn (icode
, 4, eops
);
10101 return eops
[0].value
;
10103 mode
= TYPE_MODE (TREE_TYPE (treeop0
));
10104 subtarget
= NULL_RTX
;
10107 case VEC_PACK_FLOAT_EXPR
:
10108 mode
= TYPE_MODE (TREE_TYPE (treeop0
));
10109 expand_operands (treeop0
, treeop1
,
10110 subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
10111 this_optab
= optab_for_tree_code (code
, TREE_TYPE (treeop0
),
10113 target
= expand_binop (mode
, this_optab
, op0
, op1
, target
,
10114 TYPE_UNSIGNED (TREE_TYPE (treeop0
)),
10116 gcc_assert (target
);
10119 case VEC_PERM_EXPR
:
10121 expand_operands (treeop0
, treeop1
, target
, &op0
, &op1
, EXPAND_NORMAL
);
10122 vec_perm_builder sel
;
10123 if (TREE_CODE (treeop2
) == VECTOR_CST
10124 && tree_to_vec_perm_builder (&sel
, treeop2
))
10126 machine_mode sel_mode
= TYPE_MODE (TREE_TYPE (treeop2
));
10127 temp
= expand_vec_perm_const (mode
, op0
, op1
, sel
,
10132 op2
= expand_normal (treeop2
);
10133 temp
= expand_vec_perm_var (mode
, op0
, op1
, op2
, target
);
10139 case DOT_PROD_EXPR
:
10141 tree oprnd0
= treeop0
;
10142 tree oprnd1
= treeop1
;
10143 tree oprnd2
= treeop2
;
10145 expand_operands (oprnd0
, oprnd1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
10146 op2
= expand_normal (oprnd2
);
10147 target
= expand_widen_pattern_expr (ops
, op0
, op1
, op2
,
10148 target
, unsignedp
);
10154 tree oprnd0
= treeop0
;
10155 tree oprnd1
= treeop1
;
10156 tree oprnd2
= treeop2
;
10158 expand_operands (oprnd0
, oprnd1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
10159 op2
= expand_normal (oprnd2
);
10160 target
= expand_widen_pattern_expr (ops
, op0
, op1
, op2
,
10161 target
, unsignedp
);
10165 case REALIGN_LOAD_EXPR
:
10167 tree oprnd0
= treeop0
;
10168 tree oprnd1
= treeop1
;
10169 tree oprnd2
= treeop2
;
10171 this_optab
= optab_for_tree_code (code
, type
, optab_default
);
10172 expand_operands (oprnd0
, oprnd1
, NULL_RTX
, &op0
, &op1
, EXPAND_NORMAL
);
10173 op2
= expand_normal (oprnd2
);
10174 temp
= expand_ternary_op (mode
, this_optab
, op0
, op1
, op2
,
10175 target
, unsignedp
);
10182 /* A COND_EXPR with its type being VOID_TYPE represents a
10183 conditional jump and is handled in
10184 expand_gimple_cond_expr. */
10185 gcc_assert (!VOID_TYPE_P (type
));
10187 /* Note that COND_EXPRs whose type is a structure or union
10188 are required to be constructed to contain assignments of
10189 a temporary variable, so that we can evaluate them here
10190 for side effect only. If type is void, we must do likewise. */
10192 gcc_assert (!TREE_ADDRESSABLE (type
)
10194 && TREE_TYPE (treeop1
) != void_type_node
10195 && TREE_TYPE (treeop2
) != void_type_node
);
10197 temp
= expand_cond_expr_using_cmove (treeop0
, treeop1
, treeop2
);
10201 /* If we are not to produce a result, we have no target. Otherwise,
10202 if a target was specified use it; it will not be used as an
10203 intermediate target unless it is safe. If no target, use a
10206 if (modifier
!= EXPAND_STACK_PARM
10208 && safe_from_p (original_target
, treeop0
, 1)
10209 && GET_MODE (original_target
) == mode
10210 && !MEM_P (original_target
))
10211 temp
= original_target
;
10213 temp
= assign_temp (type
, 0, 1);
10215 do_pending_stack_adjust ();
10217 rtx_code_label
*lab0
= gen_label_rtx ();
10218 rtx_code_label
*lab1
= gen_label_rtx ();
10219 jumpifnot (treeop0
, lab0
,
10220 profile_probability::uninitialized ());
10221 store_expr (treeop1
, temp
,
10222 modifier
== EXPAND_STACK_PARM
,
10225 emit_jump_insn (targetm
.gen_jump (lab1
));
10228 store_expr (treeop2
, temp
,
10229 modifier
== EXPAND_STACK_PARM
,
10237 case VEC_DUPLICATE_EXPR
:
10238 op0
= expand_expr (treeop0
, NULL_RTX
, VOIDmode
, modifier
);
10239 target
= expand_vector_broadcast (mode
, op0
);
10240 gcc_assert (target
);
10243 case VEC_SERIES_EXPR
:
10244 expand_operands (treeop0
, treeop1
, NULL_RTX
, &op0
, &op1
, modifier
);
10245 return expand_vec_series_expr (mode
, op0
, op1
, target
);
10247 case BIT_INSERT_EXPR
:
10249 unsigned bitpos
= tree_to_uhwi (treeop2
);
10251 if (INTEGRAL_TYPE_P (TREE_TYPE (treeop1
)))
10252 bitsize
= TYPE_PRECISION (TREE_TYPE (treeop1
));
10254 bitsize
= tree_to_uhwi (TYPE_SIZE (TREE_TYPE (treeop1
)));
10255 op0
= expand_normal (treeop0
);
10256 op1
= expand_normal (treeop1
);
10257 rtx dst
= gen_reg_rtx (mode
);
10258 emit_move_insn (dst
, op0
);
10259 store_bit_field (dst
, bitsize
, bitpos
, 0, 0,
10260 TYPE_MODE (TREE_TYPE (treeop1
)), op1
, false);
10265 gcc_unreachable ();
10268 /* Here to do an ordinary binary operator. */
10270 expand_operands (treeop0
, treeop1
,
10271 subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
10273 this_optab
= optab_for_tree_code (code
, type
, optab_default
);
10275 if (modifier
== EXPAND_STACK_PARM
)
10277 temp
= expand_binop (mode
, this_optab
, op0
, op1
, target
,
10278 unsignedp
, OPTAB_LIB_WIDEN
);
10280 /* Bitwise operations do not need bitfield reduction as we expect their
10281 operands being properly truncated. */
10282 if (code
== BIT_XOR_EXPR
10283 || code
== BIT_AND_EXPR
10284 || code
== BIT_IOR_EXPR
)
10286 return REDUCE_BIT_FIELD (temp
);
10288 #undef REDUCE_BIT_FIELD
10291 /* Return TRUE if expression STMT is suitable for replacement.
10292 Never consider memory loads as replaceable, because those don't ever lead
10293 into constant expressions. */
10296 stmt_is_replaceable_p (gimple
*stmt
)
10298 if (ssa_is_replaceable_p (stmt
))
10300 /* Don't move around loads. */
10301 if (!gimple_assign_single_p (stmt
)
10302 || is_gimple_val (gimple_assign_rhs1 (stmt
)))
10309 expand_expr_real_1 (tree exp
, rtx target
, machine_mode tmode
,
10310 enum expand_modifier modifier
, rtx
*alt_rtl
,
10311 bool inner_reference_p
)
10313 rtx op0
, op1
, temp
, decl_rtl
;
10316 machine_mode mode
, dmode
;
10317 enum tree_code code
= TREE_CODE (exp
);
10318 rtx subtarget
, original_target
;
10321 bool reduce_bit_field
;
10322 location_t loc
= EXPR_LOCATION (exp
);
10323 struct separate_ops ops
;
10324 tree treeop0
, treeop1
, treeop2
;
10325 tree ssa_name
= NULL_TREE
;
10328 type
= TREE_TYPE (exp
);
10329 mode
= TYPE_MODE (type
);
10330 unsignedp
= TYPE_UNSIGNED (type
);
10332 treeop0
= treeop1
= treeop2
= NULL_TREE
;
10333 if (!VL_EXP_CLASS_P (exp
))
10334 switch (TREE_CODE_LENGTH (code
))
10337 case 3: treeop2
= TREE_OPERAND (exp
, 2); /* FALLTHRU */
10338 case 2: treeop1
= TREE_OPERAND (exp
, 1); /* FALLTHRU */
10339 case 1: treeop0
= TREE_OPERAND (exp
, 0); /* FALLTHRU */
10347 ops
.location
= loc
;
10349 ignore
= (target
== const0_rtx
10350 || ((CONVERT_EXPR_CODE_P (code
)
10351 || code
== COND_EXPR
|| code
== VIEW_CONVERT_EXPR
)
10352 && TREE_CODE (type
) == VOID_TYPE
));
10354 /* An operation in what may be a bit-field type needs the
10355 result to be reduced to the precision of the bit-field type,
10356 which is narrower than that of the type's mode. */
10357 reduce_bit_field
= (!ignore
10358 && INTEGRAL_TYPE_P (type
)
10359 && !type_has_mode_precision_p (type
));
10361 /* If we are going to ignore this result, we need only do something
10362 if there is a side-effect somewhere in the expression. If there
10363 is, short-circuit the most common cases here. Note that we must
10364 not call expand_expr with anything but const0_rtx in case this
10365 is an initial expansion of a size that contains a PLACEHOLDER_EXPR. */
10369 if (! TREE_SIDE_EFFECTS (exp
))
10372 /* Ensure we reference a volatile object even if value is ignored, but
10373 don't do this if all we are doing is taking its address. */
10374 if (TREE_THIS_VOLATILE (exp
)
10375 && TREE_CODE (exp
) != FUNCTION_DECL
10376 && mode
!= VOIDmode
&& mode
!= BLKmode
10377 && modifier
!= EXPAND_CONST_ADDRESS
)
10379 temp
= expand_expr (exp
, NULL_RTX
, VOIDmode
, modifier
);
10381 copy_to_reg (temp
);
10385 if (TREE_CODE_CLASS (code
) == tcc_unary
10386 || code
== BIT_FIELD_REF
10387 || code
== COMPONENT_REF
10388 || code
== INDIRECT_REF
)
10389 return expand_expr (treeop0
, const0_rtx
, VOIDmode
,
10392 else if (TREE_CODE_CLASS (code
) == tcc_binary
10393 || TREE_CODE_CLASS (code
) == tcc_comparison
10394 || code
== ARRAY_REF
|| code
== ARRAY_RANGE_REF
)
10396 expand_expr (treeop0
, const0_rtx
, VOIDmode
, modifier
);
10397 expand_expr (treeop1
, const0_rtx
, VOIDmode
, modifier
);
10404 if (reduce_bit_field
&& modifier
== EXPAND_STACK_PARM
)
10407 /* Use subtarget as the target for operand 0 of a binary operation. */
10408 subtarget
= get_subtarget (target
);
10409 original_target
= target
;
10415 tree function
= decl_function_context (exp
);
10417 temp
= label_rtx (exp
);
10418 temp
= gen_rtx_LABEL_REF (Pmode
, temp
);
10420 if (function
!= current_function_decl
10422 LABEL_REF_NONLOCAL_P (temp
) = 1;
10424 temp
= gen_rtx_MEM (FUNCTION_MODE
, temp
);
10429 /* ??? ivopts calls expander, without any preparation from
10430 out-of-ssa. So fake instructions as if this was an access to the
10431 base variable. This unnecessarily allocates a pseudo, see how we can
10432 reuse it, if partition base vars have it set already. */
10433 if (!currently_expanding_to_rtl
)
10435 tree var
= SSA_NAME_VAR (exp
);
10436 if (var
&& DECL_RTL_SET_P (var
))
10437 return DECL_RTL (var
);
10438 return gen_raw_REG (TYPE_MODE (TREE_TYPE (exp
)),
10439 LAST_VIRTUAL_REGISTER
+ 1);
10442 g
= get_gimple_for_ssa_name (exp
);
10443 /* For EXPAND_INITIALIZER try harder to get something simpler. */
10445 && modifier
== EXPAND_INITIALIZER
10446 && !SSA_NAME_IS_DEFAULT_DEF (exp
)
10447 && (optimize
|| !SSA_NAME_VAR (exp
)
10448 || DECL_IGNORED_P (SSA_NAME_VAR (exp
)))
10449 && stmt_is_replaceable_p (SSA_NAME_DEF_STMT (exp
)))
10450 g
= SSA_NAME_DEF_STMT (exp
);
10454 location_t saved_loc
= curr_insn_location ();
10455 loc
= gimple_location (g
);
10456 if (loc
!= UNKNOWN_LOCATION
)
10457 set_curr_insn_location (loc
);
10458 ops
.code
= gimple_assign_rhs_code (g
);
10459 switch (get_gimple_rhs_class (ops
.code
))
10461 case GIMPLE_TERNARY_RHS
:
10462 ops
.op2
= gimple_assign_rhs3 (g
);
10464 case GIMPLE_BINARY_RHS
:
10465 ops
.op1
= gimple_assign_rhs2 (g
);
10467 /* Try to expand conditonal compare. */
10468 if (targetm
.gen_ccmp_first
)
10470 gcc_checking_assert (targetm
.gen_ccmp_next
!= NULL
);
10471 r
= expand_ccmp_expr (g
, mode
);
10476 case GIMPLE_UNARY_RHS
:
10477 ops
.op0
= gimple_assign_rhs1 (g
);
10478 ops
.type
= TREE_TYPE (gimple_assign_lhs (g
));
10479 ops
.location
= loc
;
10480 r
= expand_expr_real_2 (&ops
, target
, tmode
, modifier
);
10482 case GIMPLE_SINGLE_RHS
:
10484 r
= expand_expr_real (gimple_assign_rhs1 (g
), target
,
10485 tmode
, modifier
, alt_rtl
,
10486 inner_reference_p
);
10490 gcc_unreachable ();
10492 set_curr_insn_location (saved_loc
);
10493 if (REG_P (r
) && !REG_EXPR (r
))
10494 set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (exp
), r
);
10499 decl_rtl
= get_rtx_for_ssa_name (ssa_name
);
10500 exp
= SSA_NAME_VAR (ssa_name
);
10501 goto expand_decl_rtl
;
10504 /* Allow accel compiler to handle variables that require special
10505 treatment, e.g. if they have been modified in some way earlier in
10506 compilation by the adjust_private_decl OpenACC hook. */
10507 if (flag_openacc
&& targetm
.goacc
.expand_var_decl
)
10509 temp
= targetm
.goacc
.expand_var_decl (exp
);
10513 /* ... fall through ... */
10516 /* If a static var's type was incomplete when the decl was written,
10517 but the type is complete now, lay out the decl now. */
10518 if (DECL_SIZE (exp
) == 0
10519 && COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (TREE_TYPE (exp
))
10520 && (TREE_STATIC (exp
) || DECL_EXTERNAL (exp
)))
10521 layout_decl (exp
, 0);
10525 case FUNCTION_DECL
:
10527 decl_rtl
= DECL_RTL (exp
);
10529 gcc_assert (decl_rtl
);
10531 /* DECL_MODE might change when TYPE_MODE depends on attribute target
10532 settings for VECTOR_TYPE_P that might switch for the function. */
10533 if (currently_expanding_to_rtl
10534 && code
== VAR_DECL
&& MEM_P (decl_rtl
)
10535 && VECTOR_TYPE_P (type
) && exp
&& DECL_MODE (exp
) != mode
)
10536 decl_rtl
= change_address (decl_rtl
, TYPE_MODE (type
), 0);
10538 decl_rtl
= copy_rtx (decl_rtl
);
10540 /* Record writes to register variables. */
10541 if (modifier
== EXPAND_WRITE
10542 && REG_P (decl_rtl
)
10543 && HARD_REGISTER_P (decl_rtl
))
10544 add_to_hard_reg_set (&crtl
->asm_clobbers
,
10545 GET_MODE (decl_rtl
), REGNO (decl_rtl
));
10547 /* Ensure variable marked as used even if it doesn't go through
10548 a parser. If it hasn't be used yet, write out an external
10551 TREE_USED (exp
) = 1;
10553 /* Show we haven't gotten RTL for this yet. */
10556 /* Variables inherited from containing functions should have
10557 been lowered by this point. */
10559 context
= decl_function_context (exp
);
10561 || SCOPE_FILE_SCOPE_P (context
)
10562 || context
== current_function_decl
10563 || TREE_STATIC (exp
)
10564 || DECL_EXTERNAL (exp
)
10565 /* ??? C++ creates functions that are not TREE_STATIC. */
10566 || TREE_CODE (exp
) == FUNCTION_DECL
);
10568 /* This is the case of an array whose size is to be determined
10569 from its initializer, while the initializer is still being parsed.
10570 ??? We aren't parsing while expanding anymore. */
10572 if (MEM_P (decl_rtl
) && REG_P (XEXP (decl_rtl
, 0)))
10573 temp
= validize_mem (decl_rtl
);
10575 /* If DECL_RTL is memory, we are in the normal case and the
10576 address is not valid, get the address into a register. */
10578 else if (MEM_P (decl_rtl
) && modifier
!= EXPAND_INITIALIZER
)
10581 *alt_rtl
= decl_rtl
;
10582 decl_rtl
= use_anchored_address (decl_rtl
);
10583 if (modifier
!= EXPAND_CONST_ADDRESS
10584 && modifier
!= EXPAND_SUM
10585 && !memory_address_addr_space_p (exp
? DECL_MODE (exp
)
10586 : GET_MODE (decl_rtl
),
10587 XEXP (decl_rtl
, 0),
10588 MEM_ADDR_SPACE (decl_rtl
)))
10589 temp
= replace_equiv_address (decl_rtl
,
10590 copy_rtx (XEXP (decl_rtl
, 0)));
10593 /* If we got something, return it. But first, set the alignment
10594 if the address is a register. */
10597 if (exp
&& MEM_P (temp
) && REG_P (XEXP (temp
, 0)))
10598 mark_reg_pointer (XEXP (temp
, 0), DECL_ALIGN (exp
));
10600 else if (MEM_P (decl_rtl
))
10606 && modifier
!= EXPAND_WRITE
10607 && modifier
!= EXPAND_MEMORY
10608 && modifier
!= EXPAND_INITIALIZER
10609 && modifier
!= EXPAND_CONST_ADDRESS
10610 && modifier
!= EXPAND_SUM
10611 && !inner_reference_p
10613 && MEM_ALIGN (temp
) < GET_MODE_ALIGNMENT (mode
))
10614 temp
= expand_misaligned_mem_ref (temp
, mode
, unsignedp
,
10615 MEM_ALIGN (temp
), NULL_RTX
, NULL
);
10621 dmode
= DECL_MODE (exp
);
10623 dmode
= TYPE_MODE (TREE_TYPE (ssa_name
));
10625 /* If the mode of DECL_RTL does not match that of the decl,
10626 there are two cases: we are dealing with a BLKmode value
10627 that is returned in a register, or we are dealing with
10628 a promoted value. In the latter case, return a SUBREG
10629 of the wanted mode, but mark it so that we know that it
10630 was already extended. */
10631 if (REG_P (decl_rtl
)
10632 && dmode
!= BLKmode
10633 && GET_MODE (decl_rtl
) != dmode
)
10635 machine_mode pmode
;
10637 /* Get the signedness to be used for this variable. Ensure we get
10638 the same mode we got when the variable was declared. */
10639 if (code
!= SSA_NAME
)
10640 pmode
= promote_decl_mode (exp
, &unsignedp
);
10641 else if ((g
= SSA_NAME_DEF_STMT (ssa_name
))
10642 && gimple_code (g
) == GIMPLE_CALL
10643 && !gimple_call_internal_p (g
))
10644 pmode
= promote_function_mode (type
, mode
, &unsignedp
,
10645 gimple_call_fntype (g
),
10648 pmode
= promote_ssa_mode (ssa_name
, &unsignedp
);
10649 gcc_assert (GET_MODE (decl_rtl
) == pmode
);
10651 temp
= gen_lowpart_SUBREG (mode
, decl_rtl
);
10652 SUBREG_PROMOTED_VAR_P (temp
) = 1;
10653 SUBREG_PROMOTED_SET (temp
, unsignedp
);
10661 /* Given that TYPE_PRECISION (type) is not always equal to
10662 GET_MODE_PRECISION (TYPE_MODE (type)), we need to extend from
10663 the former to the latter according to the signedness of the
10665 scalar_int_mode int_mode
= SCALAR_INT_TYPE_MODE (type
);
10666 temp
= immed_wide_int_const
10667 (wi::to_wide (exp
, GET_MODE_PRECISION (int_mode
)), int_mode
);
10673 tree tmp
= NULL_TREE
;
10674 if (VECTOR_MODE_P (mode
))
10675 return const_vector_from_tree (exp
);
10676 scalar_int_mode int_mode
;
10677 if (is_int_mode (mode
, &int_mode
))
10679 tree type_for_mode
= lang_hooks
.types
.type_for_mode (int_mode
, 1);
10681 tmp
= fold_unary_loc (loc
, VIEW_CONVERT_EXPR
,
10682 type_for_mode
, exp
);
10686 vec
<constructor_elt
, va_gc
> *v
;
10687 /* Constructors need to be fixed-length. FIXME. */
10688 unsigned int nunits
= VECTOR_CST_NELTS (exp
).to_constant ();
10689 vec_alloc (v
, nunits
);
10690 for (unsigned int i
= 0; i
< nunits
; ++i
)
10691 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, VECTOR_CST_ELT (exp
, i
));
10692 tmp
= build_constructor (type
, v
);
10694 return expand_expr (tmp
, ignore
? const0_rtx
: target
,
10699 if (modifier
== EXPAND_WRITE
)
10701 /* Writing into CONST_DECL is always invalid, but handle it
10703 addr_space_t as
= TYPE_ADDR_SPACE (TREE_TYPE (exp
));
10704 scalar_int_mode address_mode
= targetm
.addr_space
.address_mode (as
);
10705 op0
= expand_expr_addr_expr_1 (exp
, NULL_RTX
, address_mode
,
10706 EXPAND_NORMAL
, as
);
10707 op0
= memory_address_addr_space (mode
, op0
, as
);
10708 temp
= gen_rtx_MEM (mode
, op0
);
10709 set_mem_addr_space (temp
, as
);
10712 return expand_expr (DECL_INITIAL (exp
), target
, VOIDmode
, modifier
);
10715 /* If optimized, generate immediate CONST_DOUBLE
10716 which will be turned into memory by reload if necessary.
10718 We used to force a register so that loop.c could see it. But
10719 this does not allow gen_* patterns to perform optimizations with
10720 the constants. It also produces two insns in cases like "x = 1.0;".
10721 On most machines, floating-point constants are not permitted in
10722 many insns, so we'd end up copying it to a register in any case.
10724 Now, we do the copying in expand_binop, if appropriate. */
10725 return const_double_from_real_value (TREE_REAL_CST (exp
),
10726 TYPE_MODE (TREE_TYPE (exp
)));
10729 return CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (exp
),
10730 TYPE_MODE (TREE_TYPE (exp
)));
10733 /* Handle evaluating a complex constant in a CONCAT target. */
10734 if (original_target
&& GET_CODE (original_target
) == CONCAT
)
10738 mode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (exp
)));
10739 rtarg
= XEXP (original_target
, 0);
10740 itarg
= XEXP (original_target
, 1);
10742 /* Move the real and imaginary parts separately. */
10743 op0
= expand_expr (TREE_REALPART (exp
), rtarg
, mode
, EXPAND_NORMAL
);
10744 op1
= expand_expr (TREE_IMAGPART (exp
), itarg
, mode
, EXPAND_NORMAL
);
10747 emit_move_insn (rtarg
, op0
);
10749 emit_move_insn (itarg
, op1
);
10751 return original_target
;
10757 temp
= expand_expr_constant (exp
, 1, modifier
);
10759 /* temp contains a constant address.
10760 On RISC machines where a constant address isn't valid,
10761 make some insns to get that address into a register. */
10762 if (modifier
!= EXPAND_CONST_ADDRESS
10763 && modifier
!= EXPAND_INITIALIZER
10764 && modifier
!= EXPAND_SUM
10765 && ! memory_address_addr_space_p (mode
, XEXP (temp
, 0),
10766 MEM_ADDR_SPACE (temp
)))
10767 return replace_equiv_address (temp
,
10768 copy_rtx (XEXP (temp
, 0)));
10772 return immed_wide_int_const (poly_int_cst_value (exp
), mode
);
10776 tree val
= treeop0
;
10777 rtx ret
= expand_expr_real_1 (val
, target
, tmode
, modifier
, alt_rtl
,
10778 inner_reference_p
);
10780 if (!SAVE_EXPR_RESOLVED_P (exp
))
10782 /* We can indeed still hit this case, typically via builtin
10783 expanders calling save_expr immediately before expanding
10784 something. Assume this means that we only have to deal
10785 with non-BLKmode values. */
10786 gcc_assert (GET_MODE (ret
) != BLKmode
);
10788 val
= build_decl (curr_insn_location (),
10789 VAR_DECL
, NULL
, TREE_TYPE (exp
));
10790 DECL_ARTIFICIAL (val
) = 1;
10791 DECL_IGNORED_P (val
) = 1;
10793 TREE_OPERAND (exp
, 0) = treeop0
;
10794 SAVE_EXPR_RESOLVED_P (exp
) = 1;
10796 if (!CONSTANT_P (ret
))
10797 ret
= copy_to_reg (ret
);
10798 SET_DECL_RTL (val
, ret
);
10806 /* If we don't need the result, just ensure we evaluate any
10810 unsigned HOST_WIDE_INT idx
;
10813 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp
), idx
, value
)
10814 expand_expr (value
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
10819 return expand_constructor (exp
, target
, modifier
, false);
10821 case TARGET_MEM_REF
:
10824 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp
, 0))));
10825 unsigned int align
;
10827 op0
= addr_for_mem_ref (exp
, as
, true);
10828 op0
= memory_address_addr_space (mode
, op0
, as
);
10829 temp
= gen_rtx_MEM (mode
, op0
);
10830 set_mem_attributes (temp
, exp
, 0);
10831 set_mem_addr_space (temp
, as
);
10832 align
= get_object_alignment (exp
);
10833 if (modifier
!= EXPAND_WRITE
10834 && modifier
!= EXPAND_MEMORY
10836 && align
< GET_MODE_ALIGNMENT (mode
))
10837 temp
= expand_misaligned_mem_ref (temp
, mode
, unsignedp
,
10838 align
, NULL_RTX
, NULL
);
10844 const bool reverse
= REF_REVERSE_STORAGE_ORDER (exp
);
10846 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp
, 0))));
10847 machine_mode address_mode
;
10848 tree base
= TREE_OPERAND (exp
, 0);
10851 /* Handle expansion of non-aliased memory with non-BLKmode. That
10852 might end up in a register. */
10853 if (mem_ref_refers_to_non_mem_p (exp
))
10855 poly_int64 offset
= mem_ref_offset (exp
).force_shwi ();
10856 base
= TREE_OPERAND (base
, 0);
10857 poly_uint64 type_size
;
10858 if (known_eq (offset
, 0)
10860 && poly_int_tree_p (TYPE_SIZE (type
), &type_size
)
10861 && known_eq (GET_MODE_BITSIZE (DECL_MODE (base
)), type_size
))
10862 return expand_expr (build1 (VIEW_CONVERT_EXPR
, type
, base
),
10863 target
, tmode
, modifier
);
10864 if (TYPE_MODE (type
) == BLKmode
)
10866 temp
= assign_stack_temp (DECL_MODE (base
),
10867 GET_MODE_SIZE (DECL_MODE (base
)));
10868 store_expr (base
, temp
, 0, false, false);
10869 temp
= adjust_address (temp
, BLKmode
, offset
);
10870 set_mem_size (temp
, int_size_in_bytes (type
));
10873 exp
= build3 (BIT_FIELD_REF
, type
, base
, TYPE_SIZE (type
),
10874 bitsize_int (offset
* BITS_PER_UNIT
));
10875 REF_REVERSE_STORAGE_ORDER (exp
) = reverse
;
10876 return expand_expr (exp
, target
, tmode
, modifier
);
10878 address_mode
= targetm
.addr_space
.address_mode (as
);
10879 if ((def_stmt
= get_def_for_expr (base
, BIT_AND_EXPR
)))
10881 tree mask
= gimple_assign_rhs2 (def_stmt
);
10882 base
= build2 (BIT_AND_EXPR
, TREE_TYPE (base
),
10883 gimple_assign_rhs1 (def_stmt
), mask
);
10884 TREE_OPERAND (exp
, 0) = base
;
10886 align
= get_object_alignment (exp
);
10887 op0
= expand_expr (base
, NULL_RTX
, VOIDmode
, EXPAND_SUM
);
10888 op0
= memory_address_addr_space (mode
, op0
, as
);
10889 if (!integer_zerop (TREE_OPERAND (exp
, 1)))
10891 rtx off
= immed_wide_int_const (mem_ref_offset (exp
), address_mode
);
10892 op0
= simplify_gen_binary (PLUS
, address_mode
, op0
, off
);
10893 op0
= memory_address_addr_space (mode
, op0
, as
);
10895 temp
= gen_rtx_MEM (mode
, op0
);
10896 set_mem_attributes (temp
, exp
, 0);
10897 set_mem_addr_space (temp
, as
);
10898 if (TREE_THIS_VOLATILE (exp
))
10899 MEM_VOLATILE_P (temp
) = 1;
10900 if (modifier
!= EXPAND_WRITE
10901 && modifier
!= EXPAND_MEMORY
10902 && !inner_reference_p
10904 && align
< GET_MODE_ALIGNMENT (mode
))
10905 temp
= expand_misaligned_mem_ref (temp
, mode
, unsignedp
, align
,
10906 modifier
== EXPAND_STACK_PARM
10907 ? NULL_RTX
: target
, alt_rtl
);
10909 && modifier
!= EXPAND_MEMORY
10910 && modifier
!= EXPAND_WRITE
)
10911 temp
= flip_storage_order (mode
, temp
);
10918 tree array
= treeop0
;
10919 tree index
= treeop1
;
10922 /* Fold an expression like: "foo"[2].
10923 This is not done in fold so it won't happen inside &.
10924 Don't fold if this is for wide characters since it's too
10925 difficult to do correctly and this is a very rare case. */
10927 if (modifier
!= EXPAND_CONST_ADDRESS
10928 && modifier
!= EXPAND_INITIALIZER
10929 && modifier
!= EXPAND_MEMORY
)
10931 tree t
= fold_read_from_constant_string (exp
);
10934 return expand_expr (t
, target
, tmode
, modifier
);
10937 /* If this is a constant index into a constant array,
10938 just get the value from the array. Handle both the cases when
10939 we have an explicit constructor and when our operand is a variable
10940 that was declared const. */
10942 if (modifier
!= EXPAND_CONST_ADDRESS
10943 && modifier
!= EXPAND_INITIALIZER
10944 && modifier
!= EXPAND_MEMORY
10945 && TREE_CODE (array
) == CONSTRUCTOR
10946 && ! TREE_SIDE_EFFECTS (array
)
10947 && TREE_CODE (index
) == INTEGER_CST
)
10949 unsigned HOST_WIDE_INT ix
;
10952 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (array
), ix
,
10954 if (tree_int_cst_equal (field
, index
))
10956 if (!TREE_SIDE_EFFECTS (value
))
10957 return expand_expr (fold (value
), target
, tmode
, modifier
);
10962 else if (optimize
>= 1
10963 && modifier
!= EXPAND_CONST_ADDRESS
10964 && modifier
!= EXPAND_INITIALIZER
10965 && modifier
!= EXPAND_MEMORY
10966 && TREE_READONLY (array
) && ! TREE_SIDE_EFFECTS (array
)
10967 && TREE_CODE (index
) == INTEGER_CST
10968 && (VAR_P (array
) || TREE_CODE (array
) == CONST_DECL
)
10969 && (init
= ctor_for_folding (array
)) != error_mark_node
)
10971 if (init
== NULL_TREE
)
10973 tree value
= build_zero_cst (type
);
10974 if (TREE_CODE (value
) == CONSTRUCTOR
)
10976 /* If VALUE is a CONSTRUCTOR, this optimization is only
10977 useful if this doesn't store the CONSTRUCTOR into
10978 memory. If it does, it is more efficient to just
10979 load the data from the array directly. */
10980 rtx ret
= expand_constructor (value
, target
,
10982 if (ret
== NULL_RTX
)
10987 return expand_expr (value
, target
, tmode
, modifier
);
10989 else if (TREE_CODE (init
) == CONSTRUCTOR
)
10991 unsigned HOST_WIDE_INT ix
;
10994 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init
), ix
,
10996 if (tree_int_cst_equal (field
, index
))
10998 if (TREE_SIDE_EFFECTS (value
))
11001 if (TREE_CODE (value
) == CONSTRUCTOR
)
11003 /* If VALUE is a CONSTRUCTOR, this
11004 optimization is only useful if
11005 this doesn't store the CONSTRUCTOR
11006 into memory. If it does, it is more
11007 efficient to just load the data from
11008 the array directly. */
11009 rtx ret
= expand_constructor (value
, target
,
11011 if (ret
== NULL_RTX
)
11016 expand_expr (fold (value
), target
, tmode
, modifier
);
11019 else if (TREE_CODE (init
) == STRING_CST
)
11021 tree low_bound
= array_ref_low_bound (exp
);
11022 tree index1
= fold_convert_loc (loc
, sizetype
, treeop1
);
11024 /* Optimize the special case of a zero lower bound.
11026 We convert the lower bound to sizetype to avoid problems
11027 with constant folding. E.g. suppose the lower bound is
11028 1 and its mode is QI. Without the conversion
11029 (ARRAY + (INDEX - (unsigned char)1))
11031 (ARRAY + (-(unsigned char)1) + INDEX)
11033 (ARRAY + 255 + INDEX). Oops! */
11034 if (!integer_zerop (low_bound
))
11035 index1
= size_diffop_loc (loc
, index1
,
11036 fold_convert_loc (loc
, sizetype
,
11039 if (tree_fits_uhwi_p (index1
)
11040 && compare_tree_int (index1
, TREE_STRING_LENGTH (init
)) < 0)
11042 tree char_type
= TREE_TYPE (TREE_TYPE (init
));
11043 scalar_int_mode char_mode
;
11045 if (is_int_mode (TYPE_MODE (char_type
), &char_mode
)
11046 && GET_MODE_SIZE (char_mode
) == 1)
11047 return gen_int_mode (TREE_STRING_POINTER (init
)
11048 [TREE_INT_CST_LOW (index1
)],
11054 goto normal_inner_ref
;
11056 case COMPONENT_REF
:
11057 gcc_assert (TREE_CODE (treeop0
) != CONSTRUCTOR
);
11058 /* Fall through. */
11059 case BIT_FIELD_REF
:
11060 case ARRAY_RANGE_REF
:
11063 machine_mode mode1
, mode2
;
11064 poly_int64 bitsize
, bitpos
, bytepos
;
11066 int reversep
, volatilep
= 0, must_force_mem
;
11068 = get_inner_reference (exp
, &bitsize
, &bitpos
, &offset
, &mode1
,
11069 &unsignedp
, &reversep
, &volatilep
);
11070 rtx orig_op0
, memloc
;
11071 bool clear_mem_expr
= false;
11073 /* If we got back the original object, something is wrong. Perhaps
11074 we are evaluating an expression too early. In any event, don't
11075 infinitely recurse. */
11076 gcc_assert (tem
!= exp
);
11078 /* If TEM's type is a union of variable size, pass TARGET to the inner
11079 computation, since it will need a temporary and TARGET is known
11080 to have to do. This occurs in unchecked conversion in Ada. */
11082 = expand_expr_real (tem
,
11083 (TREE_CODE (TREE_TYPE (tem
)) == UNION_TYPE
11084 && COMPLETE_TYPE_P (TREE_TYPE (tem
))
11085 && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem
)))
11087 && modifier
!= EXPAND_STACK_PARM
11088 ? target
: NULL_RTX
),
11090 modifier
== EXPAND_SUM
? EXPAND_NORMAL
: modifier
,
11093 /* If the field has a mode, we want to access it in the
11094 field's mode, not the computed mode.
11095 If a MEM has VOIDmode (external with incomplete type),
11096 use BLKmode for it instead. */
11099 if (mode1
!= VOIDmode
)
11100 op0
= adjust_address (op0
, mode1
, 0);
11101 else if (GET_MODE (op0
) == VOIDmode
)
11102 op0
= adjust_address (op0
, BLKmode
, 0);
11106 = CONSTANT_P (op0
) ? TYPE_MODE (TREE_TYPE (tem
)) : GET_MODE (op0
);
11108 /* Make sure bitpos is not negative, it can wreak havoc later. */
11109 if (maybe_lt (bitpos
, 0))
11111 gcc_checking_assert (offset
== NULL_TREE
);
11112 offset
= size_int (bits_to_bytes_round_down (bitpos
));
11113 bitpos
= num_trailing_bits (bitpos
);
11116 /* If we have either an offset, a BLKmode result, or a reference
11117 outside the underlying object, we must force it to memory.
11118 Such a case can occur in Ada if we have unchecked conversion
11119 of an expression from a scalar type to an aggregate type or
11120 for an ARRAY_RANGE_REF whose type is BLKmode, or if we were
11121 passed a partially uninitialized object or a view-conversion
11122 to a larger size. */
11123 must_force_mem
= (offset
11124 || mode1
== BLKmode
11125 || (mode
== BLKmode
11126 && !int_mode_for_size (bitsize
, 1).exists ())
11127 || maybe_gt (bitpos
+ bitsize
,
11128 GET_MODE_BITSIZE (mode2
)));
11130 /* Handle CONCAT first. */
11131 if (GET_CODE (op0
) == CONCAT
&& !must_force_mem
)
11133 if (known_eq (bitpos
, 0)
11134 && known_eq (bitsize
, GET_MODE_BITSIZE (GET_MODE (op0
)))
11135 && COMPLEX_MODE_P (mode1
)
11136 && COMPLEX_MODE_P (GET_MODE (op0
))
11137 && (GET_MODE_PRECISION (GET_MODE_INNER (mode1
))
11138 == GET_MODE_PRECISION (GET_MODE_INNER (GET_MODE (op0
)))))
11141 op0
= flip_storage_order (GET_MODE (op0
), op0
);
11142 if (mode1
!= GET_MODE (op0
))
11145 for (int i
= 0; i
< 2; i
++)
11147 rtx op
= read_complex_part (op0
, i
!= 0);
11148 if (GET_CODE (op
) == SUBREG
)
11149 op
= force_reg (GET_MODE (op
), op
);
11150 temp
= gen_lowpart_common (GET_MODE_INNER (mode1
), op
);
11155 if (!REG_P (op
) && !MEM_P (op
))
11156 op
= force_reg (GET_MODE (op
), op
);
11157 op
= gen_lowpart (GET_MODE_INNER (mode1
), op
);
11161 op0
= gen_rtx_CONCAT (mode1
, parts
[0], parts
[1]);
11165 if (known_eq (bitpos
, 0)
11166 && known_eq (bitsize
,
11167 GET_MODE_BITSIZE (GET_MODE (XEXP (op0
, 0))))
11168 && maybe_ne (bitsize
, 0))
11170 op0
= XEXP (op0
, 0);
11171 mode2
= GET_MODE (op0
);
11173 else if (known_eq (bitpos
,
11174 GET_MODE_BITSIZE (GET_MODE (XEXP (op0
, 0))))
11175 && known_eq (bitsize
,
11176 GET_MODE_BITSIZE (GET_MODE (XEXP (op0
, 1))))
11177 && maybe_ne (bitpos
, 0)
11178 && maybe_ne (bitsize
, 0))
11180 op0
= XEXP (op0
, 1);
11182 mode2
= GET_MODE (op0
);
11185 /* Otherwise force into memory. */
11186 must_force_mem
= 1;
11189 /* If this is a constant, put it in a register if it is a legitimate
11190 constant and we don't need a memory reference. */
11191 if (CONSTANT_P (op0
)
11192 && mode2
!= BLKmode
11193 && targetm
.legitimate_constant_p (mode2
, op0
)
11194 && !must_force_mem
)
11195 op0
= force_reg (mode2
, op0
);
11197 /* Otherwise, if this is a constant, try to force it to the constant
11198 pool. Note that back-ends, e.g. MIPS, may refuse to do so if it
11199 is a legitimate constant. */
11200 else if (CONSTANT_P (op0
) && (memloc
= force_const_mem (mode2
, op0
)))
11201 op0
= validize_mem (memloc
);
11203 /* Otherwise, if this is a constant or the object is not in memory
11204 and need be, put it there. */
11205 else if (CONSTANT_P (op0
) || (!MEM_P (op0
) && must_force_mem
))
11207 memloc
= assign_temp (TREE_TYPE (tem
), 1, 1);
11208 emit_move_insn (memloc
, op0
);
11210 clear_mem_expr
= true;
11215 machine_mode address_mode
;
11216 rtx offset_rtx
= expand_expr (offset
, NULL_RTX
, VOIDmode
,
11219 gcc_assert (MEM_P (op0
));
11221 address_mode
= get_address_mode (op0
);
11222 if (GET_MODE (offset_rtx
) != address_mode
)
11224 /* We cannot be sure that the RTL in offset_rtx is valid outside
11225 of a memory address context, so force it into a register
11226 before attempting to convert it to the desired mode. */
11227 offset_rtx
= force_operand (offset_rtx
, NULL_RTX
);
11228 offset_rtx
= convert_to_mode (address_mode
, offset_rtx
, 0);
11231 /* See the comment in expand_assignment for the rationale. */
11232 if (mode1
!= VOIDmode
11233 && maybe_ne (bitpos
, 0)
11234 && maybe_gt (bitsize
, 0)
11235 && multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
11236 && multiple_p (bitpos
, bitsize
)
11237 && multiple_p (bitsize
, GET_MODE_ALIGNMENT (mode1
))
11238 && MEM_ALIGN (op0
) >= GET_MODE_ALIGNMENT (mode1
))
11240 op0
= adjust_address (op0
, mode1
, bytepos
);
11244 op0
= offset_address (op0
, offset_rtx
,
11245 highest_pow2_factor (offset
));
11248 /* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT,
11249 record its alignment as BIGGEST_ALIGNMENT. */
11251 && known_eq (bitpos
, 0)
11253 && is_aligning_offset (offset
, tem
))
11254 set_mem_align (op0
, BIGGEST_ALIGNMENT
);
11256 /* Don't forget about volatility even if this is a bitfield. */
11257 if (MEM_P (op0
) && volatilep
&& ! MEM_VOLATILE_P (op0
))
11259 if (op0
== orig_op0
)
11260 op0
= copy_rtx (op0
);
11262 MEM_VOLATILE_P (op0
) = 1;
11265 if (MEM_P (op0
) && TREE_CODE (tem
) == FUNCTION_DECL
)
11267 if (op0
== orig_op0
)
11268 op0
= copy_rtx (op0
);
11270 set_mem_align (op0
, BITS_PER_UNIT
);
11273 /* In cases where an aligned union has an unaligned object
11274 as a field, we might be extracting a BLKmode value from
11275 an integer-mode (e.g., SImode) object. Handle this case
11276 by doing the extract into an object as wide as the field
11277 (which we know to be the width of a basic mode), then
11278 storing into memory, and changing the mode to BLKmode. */
11279 if (mode1
== VOIDmode
11280 || REG_P (op0
) || GET_CODE (op0
) == SUBREG
11281 || (mode1
!= BLKmode
&& ! direct_load
[(int) mode1
]
11282 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_INT
11283 && GET_MODE_CLASS (mode
) != MODE_COMPLEX_FLOAT
11284 && modifier
!= EXPAND_CONST_ADDRESS
11285 && modifier
!= EXPAND_INITIALIZER
11286 && modifier
!= EXPAND_MEMORY
)
11287 /* If the bitfield is volatile and the bitsize
11288 is narrower than the access size of the bitfield,
11289 we need to extract bitfields from the access. */
11290 || (volatilep
&& TREE_CODE (exp
) == COMPONENT_REF
11291 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (exp
, 1))
11292 && mode1
!= BLKmode
11293 && maybe_lt (bitsize
, GET_MODE_SIZE (mode1
) * BITS_PER_UNIT
))
11294 /* If the field isn't aligned enough to fetch as a memref,
11295 fetch it as a bit field. */
11296 || (mode1
!= BLKmode
11298 ? MEM_ALIGN (op0
) < GET_MODE_ALIGNMENT (mode1
)
11299 || !multiple_p (bitpos
, GET_MODE_ALIGNMENT (mode1
))
11300 : TYPE_ALIGN (TREE_TYPE (tem
)) < GET_MODE_ALIGNMENT (mode
)
11301 || !multiple_p (bitpos
, GET_MODE_ALIGNMENT (mode
)))
11302 && modifier
!= EXPAND_MEMORY
11303 && ((modifier
== EXPAND_CONST_ADDRESS
11304 || modifier
== EXPAND_INITIALIZER
)
11306 : targetm
.slow_unaligned_access (mode1
,
11308 || !multiple_p (bitpos
, BITS_PER_UNIT
)))
11309 /* If the type and the field are a constant size and the
11310 size of the type isn't the same size as the bitfield,
11311 we must use bitfield operations. */
11312 || (known_size_p (bitsize
)
11313 && TYPE_SIZE (TREE_TYPE (exp
))
11314 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp
)))
11315 && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp
))),
11318 machine_mode ext_mode
= mode
;
11320 if (ext_mode
== BLKmode
11321 && ! (target
!= 0 && MEM_P (op0
)
11323 && multiple_p (bitpos
, BITS_PER_UNIT
)))
11324 ext_mode
= int_mode_for_size (bitsize
, 1).else_blk ();
11326 if (ext_mode
== BLKmode
)
11329 target
= assign_temp (type
, 1, 1);
11331 /* ??? Unlike the similar test a few lines below, this one is
11332 very likely obsolete. */
11333 if (known_eq (bitsize
, 0))
11336 /* In this case, BITPOS must start at a byte boundary and
11337 TARGET, if specified, must be a MEM. */
11338 gcc_assert (MEM_P (op0
)
11339 && (!target
|| MEM_P (target
)));
11341 bytepos
= exact_div (bitpos
, BITS_PER_UNIT
);
11342 poly_int64 bytesize
= bits_to_bytes_round_up (bitsize
);
11343 emit_block_move (target
,
11344 adjust_address (op0
, VOIDmode
, bytepos
),
11345 gen_int_mode (bytesize
, Pmode
),
11346 (modifier
== EXPAND_STACK_PARM
11347 ? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL
));
11352 /* If we have nothing to extract, the result will be 0 for targets
11353 with SHIFT_COUNT_TRUNCATED == 0 and garbage otherwise. Always
11354 return 0 for the sake of consistency, as reading a zero-sized
11355 bitfield is valid in Ada and the value is fully specified. */
11356 if (known_eq (bitsize
, 0))
11359 op0
= validize_mem (op0
);
11361 if (MEM_P (op0
) && REG_P (XEXP (op0
, 0)))
11362 mark_reg_pointer (XEXP (op0
, 0), MEM_ALIGN (op0
));
11364 /* If the result has aggregate type and the extraction is done in
11365 an integral mode, then the field may be not aligned on a byte
11366 boundary; in this case, if it has reverse storage order, it
11367 needs to be extracted as a scalar field with reverse storage
11368 order and put back into memory order afterwards. */
11369 if (AGGREGATE_TYPE_P (type
)
11370 && GET_MODE_CLASS (ext_mode
) == MODE_INT
)
11371 reversep
= TYPE_REVERSE_STORAGE_ORDER (type
);
11373 gcc_checking_assert (known_ge (bitpos
, 0));
11374 op0
= extract_bit_field (op0
, bitsize
, bitpos
, unsignedp
,
11375 (modifier
== EXPAND_STACK_PARM
11376 ? NULL_RTX
: target
),
11377 ext_mode
, ext_mode
, reversep
, alt_rtl
);
11379 /* If the result has aggregate type and the mode of OP0 is an
11380 integral mode then, if BITSIZE is narrower than this mode
11381 and this is for big-endian data, we must put the field
11382 into the high-order bits. And we must also put it back
11383 into memory order if it has been previously reversed. */
11384 scalar_int_mode op0_mode
;
11385 if (AGGREGATE_TYPE_P (type
)
11386 && is_int_mode (GET_MODE (op0
), &op0_mode
))
11388 HOST_WIDE_INT size
= GET_MODE_BITSIZE (op0_mode
);
11390 gcc_checking_assert (known_le (bitsize
, size
));
11391 if (maybe_lt (bitsize
, size
)
11392 && reversep
? !BYTES_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
11393 op0
= expand_shift (LSHIFT_EXPR
, op0_mode
, op0
,
11394 size
- bitsize
, op0
, 1);
11397 op0
= flip_storage_order (op0_mode
, op0
);
11400 /* If the result type is BLKmode, store the data into a temporary
11401 of the appropriate type, but with the mode corresponding to the
11402 mode for the data we have (op0's mode). */
11403 if (mode
== BLKmode
)
11406 = assign_stack_temp_for_type (ext_mode
,
11407 GET_MODE_BITSIZE (ext_mode
),
11409 emit_move_insn (new_rtx
, op0
);
11410 op0
= copy_rtx (new_rtx
);
11411 PUT_MODE (op0
, BLKmode
);
11417 /* If the result is BLKmode, use that to access the object
11419 if (mode
== BLKmode
)
11422 /* Get a reference to just this component. */
11423 bytepos
= bits_to_bytes_round_down (bitpos
);
11424 if (modifier
== EXPAND_CONST_ADDRESS
11425 || modifier
== EXPAND_SUM
|| modifier
== EXPAND_INITIALIZER
)
11426 op0
= adjust_address_nv (op0
, mode1
, bytepos
);
11428 op0
= adjust_address (op0
, mode1
, bytepos
);
11430 if (op0
== orig_op0
)
11431 op0
= copy_rtx (op0
);
11433 /* Don't set memory attributes if the base expression is
11434 SSA_NAME that got expanded as a MEM or a CONSTANT. In that case,
11435 we should just honor its original memory attributes. */
11436 if (!(TREE_CODE (tem
) == SSA_NAME
11437 && (MEM_P (orig_op0
) || CONSTANT_P (orig_op0
))))
11438 set_mem_attributes (op0
, exp
, 0);
11440 if (REG_P (XEXP (op0
, 0)))
11441 mark_reg_pointer (XEXP (op0
, 0), MEM_ALIGN (op0
));
11443 /* If op0 is a temporary because the original expressions was forced
11444 to memory, clear MEM_EXPR so that the original expression cannot
11445 be marked as addressable through MEM_EXPR of the temporary. */
11446 if (clear_mem_expr
)
11447 set_mem_expr (op0
, NULL_TREE
);
11449 MEM_VOLATILE_P (op0
) |= volatilep
;
11452 && modifier
!= EXPAND_MEMORY
11453 && modifier
!= EXPAND_WRITE
)
11454 op0
= flip_storage_order (mode1
, op0
);
11456 if (mode
== mode1
|| mode1
== BLKmode
|| mode1
== tmode
11457 || modifier
== EXPAND_CONST_ADDRESS
11458 || modifier
== EXPAND_INITIALIZER
)
11462 target
= gen_reg_rtx (tmode
!= VOIDmode
? tmode
: mode
);
11464 convert_move (target
, op0
, unsignedp
);
11469 return expand_expr (OBJ_TYPE_REF_EXPR (exp
), target
, tmode
, modifier
);
11472 /* All valid uses of __builtin_va_arg_pack () are removed during
11474 if (CALL_EXPR_VA_ARG_PACK (exp
))
11475 error ("invalid use of %<__builtin_va_arg_pack ()%>");
11477 tree fndecl
= get_callee_fndecl (exp
), attr
;
11480 /* Don't diagnose the error attribute in thunks, those are
11481 artificially created. */
11482 && !CALL_FROM_THUNK_P (exp
)
11483 && (attr
= lookup_attribute ("error",
11484 DECL_ATTRIBUTES (fndecl
))) != NULL
)
11486 const char *ident
= lang_hooks
.decl_printable_name (fndecl
, 1);
11487 error ("call to %qs declared with attribute error: %s",
11488 identifier_to_locale (ident
),
11489 TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr
))));
11492 /* Don't diagnose the warning attribute in thunks, those are
11493 artificially created. */
11494 && !CALL_FROM_THUNK_P (exp
)
11495 && (attr
= lookup_attribute ("warning",
11496 DECL_ATTRIBUTES (fndecl
))) != NULL
)
11498 const char *ident
= lang_hooks
.decl_printable_name (fndecl
, 1);
11499 warning_at (EXPR_LOCATION (exp
),
11500 OPT_Wattribute_warning
,
11501 "call to %qs declared with attribute warning: %s",
11502 identifier_to_locale (ident
),
11503 TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr
))));
11506 /* Check for a built-in function. */
11507 if (fndecl
&& fndecl_built_in_p (fndecl
))
11509 gcc_assert (DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_FRONTEND
);
11510 return expand_builtin (exp
, target
, subtarget
, tmode
, ignore
);
11513 return expand_call (exp
, target
, ignore
);
11515 case VIEW_CONVERT_EXPR
:
11518 /* If we are converting to BLKmode, try to avoid an intermediate
11519 temporary by fetching an inner memory reference. */
11520 if (mode
== BLKmode
11521 && poly_int_tree_p (TYPE_SIZE (type
))
11522 && TYPE_MODE (TREE_TYPE (treeop0
)) != BLKmode
11523 && handled_component_p (treeop0
))
11525 machine_mode mode1
;
11526 poly_int64 bitsize
, bitpos
, bytepos
;
11528 int reversep
, volatilep
= 0;
11530 = get_inner_reference (treeop0
, &bitsize
, &bitpos
, &offset
, &mode1
,
11531 &unsignedp
, &reversep
, &volatilep
);
11533 /* ??? We should work harder and deal with non-zero offsets. */
11535 && multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
11537 && known_size_p (bitsize
)
11538 && known_eq (wi::to_poly_offset (TYPE_SIZE (type
)), bitsize
))
11540 /* See the normal_inner_ref case for the rationale. */
11542 = expand_expr_real (tem
,
11543 (TREE_CODE (TREE_TYPE (tem
)) == UNION_TYPE
11544 && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem
)))
11546 && modifier
!= EXPAND_STACK_PARM
11547 ? target
: NULL_RTX
),
11549 modifier
== EXPAND_SUM
? EXPAND_NORMAL
: modifier
,
11552 if (MEM_P (orig_op0
))
11556 /* Get a reference to just this component. */
11557 if (modifier
== EXPAND_CONST_ADDRESS
11558 || modifier
== EXPAND_SUM
11559 || modifier
== EXPAND_INITIALIZER
)
11560 op0
= adjust_address_nv (op0
, mode
, bytepos
);
11562 op0
= adjust_address (op0
, mode
, bytepos
);
11564 if (op0
== orig_op0
)
11565 op0
= copy_rtx (op0
);
11567 set_mem_attributes (op0
, treeop0
, 0);
11568 if (REG_P (XEXP (op0
, 0)))
11569 mark_reg_pointer (XEXP (op0
, 0), MEM_ALIGN (op0
));
11571 MEM_VOLATILE_P (op0
) |= volatilep
;
11577 op0
= expand_expr_real (treeop0
, NULL_RTX
, VOIDmode
, modifier
,
11578 NULL
, inner_reference_p
);
11580 /* If the input and output modes are both the same, we are done. */
11581 if (mode
== GET_MODE (op0
))
11583 /* If neither mode is BLKmode, and both modes are the same size
11584 then we can use gen_lowpart. */
11585 else if (mode
!= BLKmode
11586 && GET_MODE (op0
) != BLKmode
11587 && known_eq (GET_MODE_PRECISION (mode
),
11588 GET_MODE_PRECISION (GET_MODE (op0
)))
11589 && !COMPLEX_MODE_P (GET_MODE (op0
)))
11591 if (GET_CODE (op0
) == SUBREG
)
11592 op0
= force_reg (GET_MODE (op0
), op0
);
11593 temp
= gen_lowpart_common (mode
, op0
);
11598 if (!REG_P (op0
) && !MEM_P (op0
))
11599 op0
= force_reg (GET_MODE (op0
), op0
);
11600 op0
= gen_lowpart (mode
, op0
);
11603 /* If both types are integral, convert from one mode to the other. */
11604 else if (INTEGRAL_TYPE_P (type
) && INTEGRAL_TYPE_P (TREE_TYPE (treeop0
)))
11605 op0
= convert_modes (mode
, GET_MODE (op0
), op0
,
11606 TYPE_UNSIGNED (TREE_TYPE (treeop0
)));
11607 /* If the output type is a bit-field type, do an extraction. */
11608 else if (reduce_bit_field
)
11609 return extract_bit_field (op0
, TYPE_PRECISION (type
), 0,
11610 TYPE_UNSIGNED (type
), NULL_RTX
,
11611 mode
, mode
, false, NULL
);
11612 /* As a last resort, spill op0 to memory, and reload it in a
11614 else if (!MEM_P (op0
))
11616 /* If the operand is not a MEM, force it into memory. Since we
11617 are going to be changing the mode of the MEM, don't call
11618 force_const_mem for constants because we don't allow pool
11619 constants to change mode. */
11620 tree inner_type
= TREE_TYPE (treeop0
);
11622 gcc_assert (!TREE_ADDRESSABLE (exp
));
11624 if (target
== 0 || GET_MODE (target
) != TYPE_MODE (inner_type
))
11626 = assign_stack_temp_for_type
11627 (TYPE_MODE (inner_type
),
11628 GET_MODE_SIZE (TYPE_MODE (inner_type
)), inner_type
);
11630 emit_move_insn (target
, op0
);
11634 /* If OP0 is (now) a MEM, we need to deal with alignment issues. If the
11635 output type is such that the operand is known to be aligned, indicate
11636 that it is. Otherwise, we need only be concerned about alignment for
11637 non-BLKmode results. */
11640 enum insn_code icode
;
11642 if (modifier
!= EXPAND_WRITE
11643 && modifier
!= EXPAND_MEMORY
11644 && !inner_reference_p
11646 && MEM_ALIGN (op0
) < GET_MODE_ALIGNMENT (mode
))
11648 /* If the target does have special handling for unaligned
11649 loads of mode then use them. */
11650 if ((icode
= optab_handler (movmisalign_optab
, mode
))
11651 != CODE_FOR_nothing
)
11655 op0
= adjust_address (op0
, mode
, 0);
11656 /* We've already validated the memory, and we're creating a
11657 new pseudo destination. The predicates really can't
11659 reg
= gen_reg_rtx (mode
);
11661 /* Nor can the insn generator. */
11662 rtx_insn
*insn
= GEN_FCN (icode
) (reg
, op0
);
11666 else if (STRICT_ALIGNMENT
)
11668 poly_uint64 mode_size
= GET_MODE_SIZE (mode
);
11669 poly_uint64 temp_size
= mode_size
;
11670 if (GET_MODE (op0
) != BLKmode
)
11671 temp_size
= upper_bound (temp_size
,
11672 GET_MODE_SIZE (GET_MODE (op0
)));
11674 = assign_stack_temp_for_type (mode
, temp_size
, type
);
11675 rtx new_with_op0_mode
11676 = adjust_address (new_rtx
, GET_MODE (op0
), 0);
11678 gcc_assert (!TREE_ADDRESSABLE (exp
));
11680 if (GET_MODE (op0
) == BLKmode
)
11682 rtx size_rtx
= gen_int_mode (mode_size
, Pmode
);
11683 emit_block_move (new_with_op0_mode
, op0
, size_rtx
,
11684 (modifier
== EXPAND_STACK_PARM
11685 ? BLOCK_OP_CALL_PARM
11686 : BLOCK_OP_NORMAL
));
11689 emit_move_insn (new_with_op0_mode
, op0
);
11695 op0
= adjust_address (op0
, mode
, 0);
11702 tree lhs
= treeop0
;
11703 tree rhs
= treeop1
;
11704 gcc_assert (ignore
);
11706 /* Check for |= or &= of a bitfield of size one into another bitfield
11707 of size 1. In this case, (unless we need the result of the
11708 assignment) we can do this more efficiently with a
11709 test followed by an assignment, if necessary.
11711 ??? At this point, we can't get a BIT_FIELD_REF here. But if
11712 things change so we do, this code should be enhanced to
11714 if (TREE_CODE (lhs
) == COMPONENT_REF
11715 && (TREE_CODE (rhs
) == BIT_IOR_EXPR
11716 || TREE_CODE (rhs
) == BIT_AND_EXPR
)
11717 && TREE_OPERAND (rhs
, 0) == lhs
11718 && TREE_CODE (TREE_OPERAND (rhs
, 1)) == COMPONENT_REF
11719 && integer_onep (DECL_SIZE (TREE_OPERAND (lhs
, 1)))
11720 && integer_onep (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs
, 1), 1))))
11722 rtx_code_label
*label
= gen_label_rtx ();
11723 int value
= TREE_CODE (rhs
) == BIT_IOR_EXPR
;
11724 profile_probability prob
= profile_probability::uninitialized ();
11726 jumpifnot (TREE_OPERAND (rhs
, 1), label
, prob
);
11728 jumpif (TREE_OPERAND (rhs
, 1), label
, prob
);
11729 expand_assignment (lhs
, build_int_cst (TREE_TYPE (rhs
), value
),
11731 do_pending_stack_adjust ();
11732 emit_label (label
);
11736 expand_assignment (lhs
, rhs
, false);
11741 return expand_expr_addr_expr (exp
, target
, tmode
, modifier
);
11743 case REALPART_EXPR
:
11744 op0
= expand_normal (treeop0
);
11745 return read_complex_part (op0
, false);
11747 case IMAGPART_EXPR
:
11748 op0
= expand_normal (treeop0
);
11749 return read_complex_part (op0
, true);
11756 /* Expanded in cfgexpand.c. */
11757 gcc_unreachable ();
11759 case TRY_CATCH_EXPR
:
11761 case EH_FILTER_EXPR
:
11762 case TRY_FINALLY_EXPR
:
11764 /* Lowered by tree-eh.c. */
11765 gcc_unreachable ();
11767 case WITH_CLEANUP_EXPR
:
11768 case CLEANUP_POINT_EXPR
:
11770 case CASE_LABEL_EXPR
:
11775 case COMPOUND_EXPR
:
11776 case PREINCREMENT_EXPR
:
11777 case PREDECREMENT_EXPR
:
11778 case POSTINCREMENT_EXPR
:
11779 case POSTDECREMENT_EXPR
:
11782 case COMPOUND_LITERAL_EXPR
:
11783 /* Lowered by gimplify.c. */
11784 gcc_unreachable ();
11787 /* Function descriptors are not valid except for as
11788 initialization constants, and should not be expanded. */
11789 gcc_unreachable ();
11791 case WITH_SIZE_EXPR
:
11792 /* WITH_SIZE_EXPR expands to its first argument. The caller should
11793 have pulled out the size to use in whatever context it needed. */
11794 return expand_expr_real (treeop0
, original_target
, tmode
,
11795 modifier
, alt_rtl
, inner_reference_p
);
11798 return expand_expr_real_2 (&ops
, target
, tmode
, modifier
);
11802 /* Subroutine of above: reduce EXP to the precision of TYPE (in the
11803 signedness of TYPE), possibly returning the result in TARGET.
11804 TYPE is known to be a partial integer type. */
11806 reduce_to_bit_field_precision (rtx exp
, rtx target
, tree type
)
11808 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (type
);
11809 HOST_WIDE_INT prec
= TYPE_PRECISION (type
);
11810 gcc_assert ((GET_MODE (exp
) == VOIDmode
|| GET_MODE (exp
) == mode
)
11811 && (!target
|| GET_MODE (target
) == mode
));
11813 /* For constant values, reduce using wide_int_to_tree. */
11814 if (poly_int_rtx_p (exp
))
11816 auto value
= wi::to_poly_wide (exp
, mode
);
11817 tree t
= wide_int_to_tree (type
, value
);
11818 return expand_expr (t
, target
, VOIDmode
, EXPAND_NORMAL
);
11820 else if (TYPE_UNSIGNED (type
))
11822 rtx mask
= immed_wide_int_const
11823 (wi::mask (prec
, false, GET_MODE_PRECISION (mode
)), mode
);
11824 return expand_and (mode
, exp
, mask
, target
);
11828 int count
= GET_MODE_PRECISION (mode
) - prec
;
11829 exp
= expand_shift (LSHIFT_EXPR
, mode
, exp
, count
, target
, 0);
11830 return expand_shift (RSHIFT_EXPR
, mode
, exp
, count
, target
, 0);
11834 /* Subroutine of above: returns 1 if OFFSET corresponds to an offset that
11835 when applied to the address of EXP produces an address known to be
11836 aligned more than BIGGEST_ALIGNMENT. */
11839 is_aligning_offset (const_tree offset
, const_tree exp
)
11841 /* Strip off any conversions. */
11842 while (CONVERT_EXPR_P (offset
))
11843 offset
= TREE_OPERAND (offset
, 0);
11845 /* We must now have a BIT_AND_EXPR with a constant that is one less than
11846 power of 2 and which is larger than BIGGEST_ALIGNMENT. */
11847 if (TREE_CODE (offset
) != BIT_AND_EXPR
11848 || !tree_fits_uhwi_p (TREE_OPERAND (offset
, 1))
11849 || compare_tree_int (TREE_OPERAND (offset
, 1),
11850 BIGGEST_ALIGNMENT
/ BITS_PER_UNIT
) <= 0
11851 || !pow2p_hwi (tree_to_uhwi (TREE_OPERAND (offset
, 1)) + 1))
11854 /* Look at the first operand of BIT_AND_EXPR and strip any conversion.
11855 It must be NEGATE_EXPR. Then strip any more conversions. */
11856 offset
= TREE_OPERAND (offset
, 0);
11857 while (CONVERT_EXPR_P (offset
))
11858 offset
= TREE_OPERAND (offset
, 0);
11860 if (TREE_CODE (offset
) != NEGATE_EXPR
)
11863 offset
= TREE_OPERAND (offset
, 0);
11864 while (CONVERT_EXPR_P (offset
))
11865 offset
= TREE_OPERAND (offset
, 0);
11867 /* This must now be the address of EXP. */
11868 return TREE_CODE (offset
) == ADDR_EXPR
&& TREE_OPERAND (offset
, 0) == exp
;
11871 /* Return a STRING_CST corresponding to ARG's constant initializer either
11872 if it's a string constant, or, when VALREP is set, any other constant,
11874 On success, set *PTR_OFFSET to the (possibly non-constant) byte offset
11875 within the byte string that ARG is references. If nonnull set *MEM_SIZE
11876 to the size of the byte string. If nonnull, set *DECL to the constant
11877 declaration ARG refers to. */
11880 constant_byte_string (tree arg
, tree
*ptr_offset
, tree
*mem_size
, tree
*decl
,
11881 bool valrep
= false)
11883 tree dummy
= NULL_TREE
;
11887 /* Store the type of the original expression before conversions
11888 via NOP_EXPR or POINTER_PLUS_EXPR to other types have been
11890 tree argtype
= TREE_TYPE (arg
);
11895 /* Non-constant index into the character array in an ARRAY_REF
11896 expression or null. */
11897 tree varidx
= NULL_TREE
;
11899 poly_int64 base_off
= 0;
11901 if (TREE_CODE (arg
) == ADDR_EXPR
)
11903 arg
= TREE_OPERAND (arg
, 0);
11905 if (TREE_CODE (arg
) == ARRAY_REF
)
11907 tree idx
= TREE_OPERAND (arg
, 1);
11908 if (TREE_CODE (idx
) != INTEGER_CST
)
11910 /* From a pointer (but not array) argument extract the variable
11911 index to prevent get_addr_base_and_unit_offset() from failing
11912 due to it. Use it later to compute the non-constant offset
11913 into the string and return it to the caller. */
11915 ref
= TREE_OPERAND (arg
, 0);
11917 if (TREE_CODE (TREE_TYPE (arg
)) == ARRAY_TYPE
)
11920 if (!integer_zerop (array_ref_low_bound (arg
)))
11923 if (!integer_onep (array_ref_element_size (arg
)))
11927 array
= get_addr_base_and_unit_offset (ref
, &base_off
);
11929 || (TREE_CODE (array
) != VAR_DECL
11930 && TREE_CODE (array
) != CONST_DECL
11931 && TREE_CODE (array
) != STRING_CST
))
11934 else if (TREE_CODE (arg
) == PLUS_EXPR
|| TREE_CODE (arg
) == POINTER_PLUS_EXPR
)
11936 tree arg0
= TREE_OPERAND (arg
, 0);
11937 tree arg1
= TREE_OPERAND (arg
, 1);
11940 tree str
= string_constant (arg0
, &offset
, mem_size
, decl
);
11943 str
= string_constant (arg1
, &offset
, mem_size
, decl
);
11949 /* Avoid pointers to arrays (see bug 86622). */
11950 if (POINTER_TYPE_P (TREE_TYPE (arg
))
11951 && TREE_CODE (TREE_TYPE (TREE_TYPE (arg
))) == ARRAY_TYPE
11952 && !(decl
&& !*decl
)
11953 && !(decl
&& tree_fits_uhwi_p (DECL_SIZE_UNIT (*decl
))
11954 && tree_fits_uhwi_p (*mem_size
)
11955 && tree_int_cst_equal (*mem_size
, DECL_SIZE_UNIT (*decl
))))
11958 tree type
= TREE_TYPE (offset
);
11959 arg1
= fold_convert (type
, arg1
);
11960 *ptr_offset
= fold_build2 (PLUS_EXPR
, type
, offset
, arg1
);
11965 else if (TREE_CODE (arg
) == SSA_NAME
)
11967 gimple
*stmt
= SSA_NAME_DEF_STMT (arg
);
11968 if (!is_gimple_assign (stmt
))
11971 tree rhs1
= gimple_assign_rhs1 (stmt
);
11972 tree_code code
= gimple_assign_rhs_code (stmt
);
11973 if (code
== ADDR_EXPR
)
11974 return string_constant (rhs1
, ptr_offset
, mem_size
, decl
);
11975 else if (code
!= POINTER_PLUS_EXPR
)
11979 if (tree str
= string_constant (rhs1
, &offset
, mem_size
, decl
))
11981 /* Avoid pointers to arrays (see bug 86622). */
11982 if (POINTER_TYPE_P (TREE_TYPE (rhs1
))
11983 && TREE_CODE (TREE_TYPE (TREE_TYPE (rhs1
))) == ARRAY_TYPE
11984 && !(decl
&& !*decl
)
11985 && !(decl
&& tree_fits_uhwi_p (DECL_SIZE_UNIT (*decl
))
11986 && tree_fits_uhwi_p (*mem_size
)
11987 && tree_int_cst_equal (*mem_size
, DECL_SIZE_UNIT (*decl
))))
11990 tree rhs2
= gimple_assign_rhs2 (stmt
);
11991 tree type
= TREE_TYPE (offset
);
11992 rhs2
= fold_convert (type
, rhs2
);
11993 *ptr_offset
= fold_build2 (PLUS_EXPR
, type
, offset
, rhs2
);
11998 else if (DECL_P (arg
))
12003 tree offset
= wide_int_to_tree (sizetype
, base_off
);
12006 if (TREE_CODE (TREE_TYPE (array
)) != ARRAY_TYPE
)
12009 gcc_assert (TREE_CODE (arg
) == ARRAY_REF
);
12010 tree chartype
= TREE_TYPE (TREE_TYPE (TREE_OPERAND (arg
, 0)));
12011 if (TREE_CODE (chartype
) != INTEGER_TYPE
)
12014 offset
= fold_convert (sizetype
, varidx
);
12017 if (TREE_CODE (array
) == STRING_CST
)
12019 *ptr_offset
= fold_convert (sizetype
, offset
);
12020 *mem_size
= TYPE_SIZE_UNIT (TREE_TYPE (array
));
12023 gcc_checking_assert (tree_to_shwi (TYPE_SIZE_UNIT (TREE_TYPE (array
)))
12024 >= TREE_STRING_LENGTH (array
));
12028 tree init
= ctor_for_folding (array
);
12029 if (!init
|| init
== error_mark_node
)
12034 HOST_WIDE_INT cstoff
;
12035 if (!base_off
.is_constant (&cstoff
))
12038 /* Check that the host and target are sane. */
12039 if (CHAR_BIT
!= 8 || BITS_PER_UNIT
!= 8)
12042 HOST_WIDE_INT typesz
= int_size_in_bytes (TREE_TYPE (init
));
12043 if (typesz
<= 0 || (int) typesz
!= typesz
)
12046 HOST_WIDE_INT size
= typesz
;
12048 && DECL_SIZE_UNIT (array
)
12049 && tree_fits_shwi_p (DECL_SIZE_UNIT (array
)))
12051 size
= tree_to_shwi (DECL_SIZE_UNIT (array
));
12052 gcc_checking_assert (size
>= typesz
);
12055 /* If value representation was requested convert the initializer
12056 for the whole array or object into a string of bytes forming
12057 its value representation and return it. */
12058 unsigned char *bytes
= XNEWVEC (unsigned char, size
);
12059 int r
= native_encode_initializer (init
, bytes
, size
);
12062 XDELETEVEC (bytes
);
12067 memset (bytes
+ r
, '\0', size
- r
);
12069 const char *p
= reinterpret_cast<const char *>(bytes
);
12070 init
= build_string_literal (size
, p
, char_type_node
);
12071 init
= TREE_OPERAND (init
, 0);
12072 init
= TREE_OPERAND (init
, 0);
12075 *mem_size
= size_int (TREE_STRING_LENGTH (init
));
12076 *ptr_offset
= wide_int_to_tree (ssizetype
, base_off
);
12084 if (TREE_CODE (init
) == CONSTRUCTOR
)
12086 /* Convert the 64-bit constant offset to a wider type to avoid
12087 overflow and use it to obtain the initializer for the subobject
12090 if (!base_off
.is_constant (&wioff
))
12093 wioff
*= BITS_PER_UNIT
;
12094 if (!wi::fits_uhwi_p (wioff
))
12097 base_off
= wioff
.to_uhwi ();
12098 unsigned HOST_WIDE_INT fieldoff
= 0;
12099 init
= fold_ctor_reference (TREE_TYPE (arg
), init
, base_off
, 0, array
,
12101 if (!init
|| init
== error_mark_node
)
12104 HOST_WIDE_INT cstoff
;
12105 if (!base_off
.is_constant (&cstoff
))
12108 cstoff
= (cstoff
- fieldoff
) / BITS_PER_UNIT
;
12109 tree off
= build_int_cst (sizetype
, cstoff
);
12111 offset
= fold_build2 (PLUS_EXPR
, TREE_TYPE (offset
), offset
, off
);
12116 *ptr_offset
= offset
;
12118 tree inittype
= TREE_TYPE (init
);
12120 if (TREE_CODE (init
) == INTEGER_CST
12121 && (TREE_CODE (TREE_TYPE (array
)) == INTEGER_TYPE
12122 || TYPE_MAIN_VARIANT (inittype
) == char_type_node
))
12124 /* Check that the host and target are sane. */
12125 if (CHAR_BIT
!= 8 || BITS_PER_UNIT
!= 8)
12128 /* For a reference to (address of) a single constant character,
12129 store the native representation of the character in CHARBUF.
12130 If the reference is to an element of an array or a member
12131 of a struct, only consider narrow characters until ctors
12132 for wide character arrays are transformed to STRING_CSTs
12133 like those for narrow arrays. */
12134 unsigned char charbuf
[MAX_BITSIZE_MODE_ANY_MODE
/ BITS_PER_UNIT
];
12135 int len
= native_encode_expr (init
, charbuf
, sizeof charbuf
, 0);
12138 /* Construct a string literal with elements of INITTYPE and
12139 the representation above. Then strip
12140 the ADDR_EXPR (ARRAY_REF (...)) around the STRING_CST. */
12141 init
= build_string_literal (len
, (char *)charbuf
, inittype
);
12142 init
= TREE_OPERAND (TREE_OPERAND (init
, 0), 0);
12146 tree initsize
= TYPE_SIZE_UNIT (inittype
);
12148 if (TREE_CODE (init
) == CONSTRUCTOR
&& initializer_zerop (init
))
12150 /* Fold an empty/zero constructor for an implicitly initialized
12151 object or subobject into the empty string. */
12153 /* Determine the character type from that of the original
12155 tree chartype
= argtype
;
12156 if (POINTER_TYPE_P (chartype
))
12157 chartype
= TREE_TYPE (chartype
);
12158 while (TREE_CODE (chartype
) == ARRAY_TYPE
)
12159 chartype
= TREE_TYPE (chartype
);
12161 if (INTEGRAL_TYPE_P (chartype
)
12162 && TYPE_PRECISION (chartype
) == TYPE_PRECISION (char_type_node
))
12164 /* Convert a char array to an empty STRING_CST having an array
12165 of the expected type and size. */
12167 initsize
= integer_zero_node
;
12169 unsigned HOST_WIDE_INT size
= tree_to_uhwi (initsize
);
12170 if (size
> (unsigned HOST_WIDE_INT
) INT_MAX
)
12173 init
= build_string_literal (size
, NULL
, chartype
, size
);
12174 init
= TREE_OPERAND (init
, 0);
12175 init
= TREE_OPERAND (init
, 0);
12177 *ptr_offset
= integer_zero_node
;
12184 if (TREE_CODE (init
) != STRING_CST
)
12187 *mem_size
= initsize
;
12189 gcc_checking_assert (tree_to_shwi (initsize
) >= TREE_STRING_LENGTH (init
));
12194 /* Return STRING_CST if an ARG corresponds to a string constant or zero
12195 if it doesn't. If we return nonzero, set *PTR_OFFSET to the (possibly
12196 non-constant) offset in bytes within the string that ARG is accessing.
12197 If MEM_SIZE is non-zero the storage size of the memory is returned.
12198 If DECL is non-zero the constant declaration is returned if available. */
12201 string_constant (tree arg
, tree
*ptr_offset
, tree
*mem_size
, tree
*decl
)
12203 return constant_byte_string (arg
, ptr_offset
, mem_size
, decl
, false);
12206 /* Similar to string_constant, return a STRING_CST corresponding
12207 to the value representation of the first argument if it's
12211 byte_representation (tree arg
, tree
*ptr_offset
, tree
*mem_size
, tree
*decl
)
12213 return constant_byte_string (arg
, ptr_offset
, mem_size
, decl
, true);
12216 /* Optimize x % C1 == C2 for signed modulo if C1 is a power of two and C2
12217 is non-zero and C3 ((1<<(prec-1)) | (C1 - 1)):
12218 for C2 > 0 to x & C3 == C2
12219 for C2 < 0 to x & C3 == (C2 & C3). */
12221 maybe_optimize_pow2p_mod_cmp (enum tree_code code
, tree
*arg0
, tree
*arg1
)
12223 gimple
*stmt
= get_def_for_expr (*arg0
, TRUNC_MOD_EXPR
);
12224 tree treeop0
= gimple_assign_rhs1 (stmt
);
12225 tree treeop1
= gimple_assign_rhs2 (stmt
);
12226 tree type
= TREE_TYPE (*arg0
);
12227 scalar_int_mode mode
;
12228 if (!is_a
<scalar_int_mode
> (TYPE_MODE (type
), &mode
))
12230 if (GET_MODE_BITSIZE (mode
) != TYPE_PRECISION (type
)
12231 || TYPE_PRECISION (type
) <= 1
12232 || TYPE_UNSIGNED (type
)
12233 /* Signed x % c == 0 should have been optimized into unsigned modulo
12235 || integer_zerop (*arg1
)
12236 /* If c is known to be non-negative, modulo will be expanded as unsigned
12238 || get_range_pos_neg (treeop0
) == 1)
12241 /* x % c == d where d < 0 && d <= -c should be always false. */
12242 if (tree_int_cst_sgn (*arg1
) == -1
12243 && -wi::to_widest (treeop1
) >= wi::to_widest (*arg1
))
12246 int prec
= TYPE_PRECISION (type
);
12247 wide_int w
= wi::to_wide (treeop1
) - 1;
12248 w
|= wi::shifted_mask (0, prec
- 1, true, prec
);
12249 tree c3
= wide_int_to_tree (type
, w
);
12251 if (tree_int_cst_sgn (*arg1
) == -1)
12252 c4
= wide_int_to_tree (type
, w
& wi::to_wide (*arg1
));
12254 rtx op0
= expand_normal (treeop0
);
12255 treeop0
= make_tree (TREE_TYPE (treeop0
), op0
);
12257 bool speed_p
= optimize_insn_for_speed_p ();
12259 do_pending_stack_adjust ();
12261 location_t loc
= gimple_location (stmt
);
12262 struct separate_ops ops
;
12263 ops
.code
= TRUNC_MOD_EXPR
;
12264 ops
.location
= loc
;
12265 ops
.type
= TREE_TYPE (treeop0
);
12268 ops
.op2
= NULL_TREE
;
12270 rtx mor
= expand_expr_real_2 (&ops
, NULL_RTX
, TYPE_MODE (ops
.type
),
12272 rtx_insn
*moinsns
= get_insns ();
12275 unsigned mocost
= seq_cost (moinsns
, speed_p
);
12276 mocost
+= rtx_cost (mor
, mode
, EQ
, 0, speed_p
);
12277 mocost
+= rtx_cost (expand_normal (*arg1
), mode
, EQ
, 1, speed_p
);
12279 ops
.code
= BIT_AND_EXPR
;
12280 ops
.location
= loc
;
12281 ops
.type
= TREE_TYPE (treeop0
);
12284 ops
.op2
= NULL_TREE
;
12286 rtx mur
= expand_expr_real_2 (&ops
, NULL_RTX
, TYPE_MODE (ops
.type
),
12288 rtx_insn
*muinsns
= get_insns ();
12291 unsigned mucost
= seq_cost (muinsns
, speed_p
);
12292 mucost
+= rtx_cost (mur
, mode
, EQ
, 0, speed_p
);
12293 mucost
+= rtx_cost (expand_normal (c4
), mode
, EQ
, 1, speed_p
);
12295 if (mocost
<= mucost
)
12297 emit_insn (moinsns
);
12298 *arg0
= make_tree (TREE_TYPE (*arg0
), mor
);
12302 emit_insn (muinsns
);
12303 *arg0
= make_tree (TREE_TYPE (*arg0
), mur
);
12308 /* Attempt to optimize unsigned (X % C1) == C2 (or (X % C1) != C2).
12310 (X - C2) * C3 <= C4 (or >), where
12311 C3 is modular multiplicative inverse of C1 and 1<<prec and
12312 C4 is ((1<<prec) - 1) / C1 or ((1<<prec) - 1) / C1 - 1 (the latter
12313 if C2 > ((1<<prec) - 1) % C1).
12314 If C1 is even, S = ctz (C1) and C2 is 0, use
12315 ((X * C3) r>> S) <= C4, where C3 is modular multiplicative
12316 inverse of C1>>S and 1<<prec and C4 is (((1<<prec) - 1) / (C1>>S)) >> S.
12318 For signed (X % C1) == 0 if C1 is odd to (all operations in it
12320 (X * C3) + C4 <= 2 * C4, where
12321 C3 is modular multiplicative inverse of (unsigned) C1 and 1<<prec and
12322 C4 is ((1<<(prec - 1) - 1) / C1).
12323 If C1 is even, S = ctz(C1), use
12324 ((X * C3) + C4) r>> S <= (C4 >> (S - 1))
12325 where C3 is modular multiplicative inverse of (unsigned)(C1>>S) and 1<<prec
12326 and C4 is ((1<<(prec - 1) - 1) / (C1>>S)) & (-1<<S).
12328 See the Hacker's Delight book, section 10-17. */
12330 maybe_optimize_mod_cmp (enum tree_code code
, tree
*arg0
, tree
*arg1
)
12332 gcc_checking_assert (code
== EQ_EXPR
|| code
== NE_EXPR
);
12333 gcc_checking_assert (TREE_CODE (*arg1
) == INTEGER_CST
);
12338 gimple
*stmt
= get_def_for_expr (*arg0
, TRUNC_MOD_EXPR
);
12342 tree treeop0
= gimple_assign_rhs1 (stmt
);
12343 tree treeop1
= gimple_assign_rhs2 (stmt
);
12344 if (TREE_CODE (treeop0
) != SSA_NAME
12345 || TREE_CODE (treeop1
) != INTEGER_CST
12346 /* Don't optimize the undefined behavior case x % 0;
12347 x % 1 should have been optimized into zero, punt if
12348 it makes it here for whatever reason;
12349 x % -c should have been optimized into x % c. */
12350 || compare_tree_int (treeop1
, 2) <= 0
12351 /* Likewise x % c == d where d >= c should be always false. */
12352 || tree_int_cst_le (treeop1
, *arg1
))
12355 /* Unsigned x % pow2 is handled right already, for signed
12356 modulo handle it in maybe_optimize_pow2p_mod_cmp. */
12357 if (integer_pow2p (treeop1
))
12358 return maybe_optimize_pow2p_mod_cmp (code
, arg0
, arg1
);
12360 tree type
= TREE_TYPE (*arg0
);
12361 scalar_int_mode mode
;
12362 if (!is_a
<scalar_int_mode
> (TYPE_MODE (type
), &mode
))
12364 if (GET_MODE_BITSIZE (mode
) != TYPE_PRECISION (type
)
12365 || TYPE_PRECISION (type
) <= 1)
12368 signop sgn
= UNSIGNED
;
12369 /* If both operands are known to have the sign bit clear, handle
12370 even the signed modulo case as unsigned. treeop1 is always
12371 positive >= 2, checked above. */
12372 if (!TYPE_UNSIGNED (type
) && get_range_pos_neg (treeop0
) != 1)
12375 if (!TYPE_UNSIGNED (type
))
12377 if (tree_int_cst_sgn (*arg1
) == -1)
12379 type
= unsigned_type_for (type
);
12380 if (!type
|| TYPE_MODE (type
) != TYPE_MODE (TREE_TYPE (*arg0
)))
12384 int prec
= TYPE_PRECISION (type
);
12385 wide_int w
= wi::to_wide (treeop1
);
12386 int shift
= wi::ctz (w
);
12387 /* Unsigned (X % C1) == C2 is equivalent to (X - C2) % C1 == 0 if
12388 C2 <= -1U % C1, because for any Z >= 0U - C2 in that case (Z % C1) != 0.
12389 If C1 is odd, we can handle all cases by subtracting
12390 C4 below. We could handle even the even C1 and C2 > -1U % C1 cases
12391 e.g. by testing for overflow on the subtraction, punt on that for now
12393 if ((sgn
== SIGNED
|| shift
) && !integer_zerop (*arg1
))
12397 wide_int x
= wi::umod_trunc (wi::mask (prec
, false, prec
), w
);
12398 if (wi::gtu_p (wi::to_wide (*arg1
), x
))
12402 imm_use_iterator imm_iter
;
12403 use_operand_p use_p
;
12404 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, treeop0
)
12406 gimple
*use_stmt
= USE_STMT (use_p
);
12407 /* Punt if treeop0 is used in the same bb in a division
12408 or another modulo with the same divisor. We should expect
12409 the division and modulo combined together. */
12410 if (use_stmt
== stmt
12411 || gimple_bb (use_stmt
) != gimple_bb (stmt
))
12413 if (!is_gimple_assign (use_stmt
)
12414 || (gimple_assign_rhs_code (use_stmt
) != TRUNC_DIV_EXPR
12415 && gimple_assign_rhs_code (use_stmt
) != TRUNC_MOD_EXPR
))
12417 if (gimple_assign_rhs1 (use_stmt
) != treeop0
12418 || !operand_equal_p (gimple_assign_rhs2 (use_stmt
), treeop1
, 0))
12423 w
= wi::lrshift (w
, shift
);
12424 wide_int a
= wide_int::from (w
, prec
+ 1, UNSIGNED
);
12425 wide_int b
= wi::shifted_mask (prec
, 1, false, prec
+ 1);
12426 wide_int m
= wide_int::from (wi::mod_inv (a
, b
), prec
, UNSIGNED
);
12427 tree c3
= wide_int_to_tree (type
, m
);
12428 tree c5
= NULL_TREE
;
12430 if (sgn
== UNSIGNED
)
12432 d
= wi::divmod_trunc (wi::mask (prec
, false, prec
), w
, UNSIGNED
, &e
);
12433 /* Use <= floor ((1<<prec) - 1) / C1 only if C2 <= ((1<<prec) - 1) % C1,
12434 otherwise use < or subtract one from C4. E.g. for
12435 x % 3U == 0 we transform this into x * 0xaaaaaaab <= 0x55555555, but
12436 x % 3U == 1 already needs to be
12437 (x - 1) * 0xaaaaaaabU <= 0x55555554. */
12438 if (!shift
&& wi::gtu_p (wi::to_wide (*arg1
), e
))
12441 d
= wi::lrshift (d
, shift
);
12445 e
= wi::udiv_trunc (wi::mask (prec
- 1, false, prec
), w
);
12447 d
= wi::lshift (e
, 1);
12450 e
= wi::bit_and (e
, wi::mask (shift
, true, prec
));
12451 d
= wi::lrshift (e
, shift
- 1);
12453 c5
= wide_int_to_tree (type
, e
);
12455 tree c4
= wide_int_to_tree (type
, d
);
12457 rtx op0
= expand_normal (treeop0
);
12458 treeop0
= make_tree (TREE_TYPE (treeop0
), op0
);
12460 bool speed_p
= optimize_insn_for_speed_p ();
12462 do_pending_stack_adjust ();
12464 location_t loc
= gimple_location (stmt
);
12465 struct separate_ops ops
;
12466 ops
.code
= TRUNC_MOD_EXPR
;
12467 ops
.location
= loc
;
12468 ops
.type
= TREE_TYPE (treeop0
);
12471 ops
.op2
= NULL_TREE
;
12473 rtx mor
= expand_expr_real_2 (&ops
, NULL_RTX
, TYPE_MODE (ops
.type
),
12475 rtx_insn
*moinsns
= get_insns ();
12478 unsigned mocost
= seq_cost (moinsns
, speed_p
);
12479 mocost
+= rtx_cost (mor
, mode
, EQ
, 0, speed_p
);
12480 mocost
+= rtx_cost (expand_normal (*arg1
), mode
, EQ
, 1, speed_p
);
12482 tree t
= fold_convert_loc (loc
, type
, treeop0
);
12483 if (!integer_zerop (*arg1
))
12484 t
= fold_build2_loc (loc
, MINUS_EXPR
, type
, t
, fold_convert (type
, *arg1
));
12485 t
= fold_build2_loc (loc
, MULT_EXPR
, type
, t
, c3
);
12487 t
= fold_build2_loc (loc
, PLUS_EXPR
, type
, t
, c5
);
12490 tree s
= build_int_cst (NULL_TREE
, shift
);
12491 t
= fold_build2_loc (loc
, RROTATE_EXPR
, type
, t
, s
);
12495 rtx mur
= expand_normal (t
);
12496 rtx_insn
*muinsns
= get_insns ();
12499 unsigned mucost
= seq_cost (muinsns
, speed_p
);
12500 mucost
+= rtx_cost (mur
, mode
, LE
, 0, speed_p
);
12501 mucost
+= rtx_cost (expand_normal (c4
), mode
, LE
, 1, speed_p
);
12503 if (mocost
<= mucost
)
12505 emit_insn (moinsns
);
12506 *arg0
= make_tree (TREE_TYPE (*arg0
), mor
);
12510 emit_insn (muinsns
);
12511 *arg0
= make_tree (type
, mur
);
12513 return code
== EQ_EXPR
? LE_EXPR
: GT_EXPR
;
12516 /* Optimize x - y < 0 into x < 0 if x - y has undefined overflow. */
12519 maybe_optimize_sub_cmp_0 (enum tree_code code
, tree
*arg0
, tree
*arg1
)
12521 gcc_checking_assert (code
== GT_EXPR
|| code
== GE_EXPR
12522 || code
== LT_EXPR
|| code
== LE_EXPR
);
12523 gcc_checking_assert (integer_zerop (*arg1
));
12528 gimple
*stmt
= get_def_for_expr (*arg0
, MINUS_EXPR
);
12532 tree treeop0
= gimple_assign_rhs1 (stmt
);
12533 tree treeop1
= gimple_assign_rhs2 (stmt
);
12534 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (treeop0
)))
12537 if (issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_COMPARISON
))
12538 warning_at (gimple_location (stmt
), OPT_Wstrict_overflow
,
12539 "assuming signed overflow does not occur when "
12540 "simplifying %<X - Y %s 0%> to %<X %s Y%>",
12541 op_symbol_code (code
), op_symbol_code (code
));
12547 /* Generate code to calculate OPS, and exploded expression
12548 using a store-flag instruction and return an rtx for the result.
12549 OPS reflects a comparison.
12551 If TARGET is nonzero, store the result there if convenient.
12553 Return zero if there is no suitable set-flag instruction
12554 available on this machine.
12556 Once expand_expr has been called on the arguments of the comparison,
12557 we are committed to doing the store flag, since it is not safe to
12558 re-evaluate the expression. We emit the store-flag insn by calling
12559 emit_store_flag, but only expand the arguments if we have a reason
12560 to believe that emit_store_flag will be successful. If we think that
12561 it will, but it isn't, we have to simulate the store-flag with a
12562 set/jump/set sequence. */
12565 do_store_flag (sepops ops
, rtx target
, machine_mode mode
)
12567 enum rtx_code code
;
12568 tree arg0
, arg1
, type
;
12569 machine_mode operand_mode
;
12572 rtx subtarget
= target
;
12573 location_t loc
= ops
->location
;
12578 /* Don't crash if the comparison was erroneous. */
12579 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
12582 type
= TREE_TYPE (arg0
);
12583 operand_mode
= TYPE_MODE (type
);
12584 unsignedp
= TYPE_UNSIGNED (type
);
12586 /* We won't bother with BLKmode store-flag operations because it would mean
12587 passing a lot of information to emit_store_flag. */
12588 if (operand_mode
== BLKmode
)
12591 /* We won't bother with store-flag operations involving function pointers
12592 when function pointers must be canonicalized before comparisons. */
12593 if (targetm
.have_canonicalize_funcptr_for_compare ()
12594 && ((POINTER_TYPE_P (TREE_TYPE (arg0
))
12595 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
))))
12596 || (POINTER_TYPE_P (TREE_TYPE (arg1
))
12597 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg1
))))))
12603 /* For vector typed comparisons emit code to generate the desired
12604 all-ones or all-zeros mask. */
12605 if (TREE_CODE (ops
->type
) == VECTOR_TYPE
)
12607 tree ifexp
= build2 (ops
->code
, ops
->type
, arg0
, arg1
);
12608 if (VECTOR_BOOLEAN_TYPE_P (ops
->type
)
12609 && expand_vec_cmp_expr_p (TREE_TYPE (arg0
), ops
->type
, ops
->code
))
12610 return expand_vec_cmp_expr (ops
->type
, ifexp
, target
);
12612 gcc_unreachable ();
12615 /* Optimize (x % C1) == C2 or (x % C1) != C2 if it is beneficial
12616 into (x - C2) * C3 < C4. */
12617 if ((ops
->code
== EQ_EXPR
|| ops
->code
== NE_EXPR
)
12618 && TREE_CODE (arg0
) == SSA_NAME
12619 && TREE_CODE (arg1
) == INTEGER_CST
)
12621 enum tree_code new_code
= maybe_optimize_mod_cmp (ops
->code
,
12623 if (new_code
!= ops
->code
)
12625 struct separate_ops nops
= *ops
;
12626 nops
.code
= ops
->code
= new_code
;
12629 nops
.type
= TREE_TYPE (arg0
);
12630 return do_store_flag (&nops
, target
, mode
);
12634 /* Optimize (x - y) < 0 into x < y if x - y has undefined overflow. */
12636 && (ops
->code
== LT_EXPR
|| ops
->code
== LE_EXPR
12637 || ops
->code
== GT_EXPR
|| ops
->code
== GE_EXPR
)
12638 && integer_zerop (arg1
)
12639 && TREE_CODE (arg0
) == SSA_NAME
)
12640 maybe_optimize_sub_cmp_0 (ops
->code
, &arg0
, &arg1
);
12642 /* Get the rtx comparison code to use. We know that EXP is a comparison
12643 operation of some type. Some comparisons against 1 and -1 can be
12644 converted to comparisons with zero. Do so here so that the tests
12645 below will be aware that we have a comparison with zero. These
12646 tests will not catch constants in the first operand, but constants
12647 are rarely passed as the first operand. */
12658 if (integer_onep (arg1
))
12659 arg1
= integer_zero_node
, code
= unsignedp
? LEU
: LE
;
12661 code
= unsignedp
? LTU
: LT
;
12664 if (! unsignedp
&& integer_all_onesp (arg1
))
12665 arg1
= integer_zero_node
, code
= LT
;
12667 code
= unsignedp
? LEU
: LE
;
12670 if (! unsignedp
&& integer_all_onesp (arg1
))
12671 arg1
= integer_zero_node
, code
= GE
;
12673 code
= unsignedp
? GTU
: GT
;
12676 if (integer_onep (arg1
))
12677 arg1
= integer_zero_node
, code
= unsignedp
? GTU
: GT
;
12679 code
= unsignedp
? GEU
: GE
;
12682 case UNORDERED_EXPR
:
12708 gcc_unreachable ();
12711 /* Put a constant second. */
12712 if (TREE_CODE (arg0
) == REAL_CST
|| TREE_CODE (arg0
) == INTEGER_CST
12713 || TREE_CODE (arg0
) == FIXED_CST
)
12715 std::swap (arg0
, arg1
);
12716 code
= swap_condition (code
);
12719 /* If this is an equality or inequality test of a single bit, we can
12720 do this by shifting the bit being tested to the low-order bit and
12721 masking the result with the constant 1. If the condition was EQ,
12722 we xor it with 1. This does not require an scc insn and is faster
12723 than an scc insn even if we have it.
12725 The code to make this transformation was moved into fold_single_bit_test,
12726 so we just call into the folder and expand its result. */
12728 if ((code
== NE
|| code
== EQ
)
12729 && integer_zerop (arg1
)
12730 && (TYPE_PRECISION (ops
->type
) != 1 || TYPE_UNSIGNED (ops
->type
)))
12732 gimple
*srcstmt
= get_def_for_expr (arg0
, BIT_AND_EXPR
);
12734 && integer_pow2p (gimple_assign_rhs2 (srcstmt
)))
12736 enum tree_code tcode
= code
== NE
? NE_EXPR
: EQ_EXPR
;
12737 type
= lang_hooks
.types
.type_for_mode (mode
, unsignedp
);
12738 tree temp
= fold_build2_loc (loc
, BIT_AND_EXPR
, TREE_TYPE (arg1
),
12739 gimple_assign_rhs1 (srcstmt
),
12740 gimple_assign_rhs2 (srcstmt
));
12741 temp
= fold_single_bit_test (loc
, tcode
, temp
, arg1
, type
);
12743 return expand_expr (temp
, target
, VOIDmode
, EXPAND_NORMAL
);
12747 if (! get_subtarget (target
)
12748 || GET_MODE (subtarget
) != operand_mode
)
12751 expand_operands (arg0
, arg1
, subtarget
, &op0
, &op1
, EXPAND_NORMAL
);
12754 target
= gen_reg_rtx (mode
);
12756 /* Try a cstore if possible. */
12757 return emit_store_flag_force (target
, code
, op0
, op1
,
12758 operand_mode
, unsignedp
,
12759 (TYPE_PRECISION (ops
->type
) == 1
12760 && !TYPE_UNSIGNED (ops
->type
)) ? -1 : 1);
12763 /* Attempt to generate a casesi instruction. Returns 1 if successful,
12764 0 otherwise (i.e. if there is no casesi instruction).
12766 DEFAULT_PROBABILITY is the probability of jumping to the default
12769 try_casesi (tree index_type
, tree index_expr
, tree minval
, tree range
,
12770 rtx table_label
, rtx default_label
, rtx fallback_label
,
12771 profile_probability default_probability
)
12773 class expand_operand ops
[5];
12774 scalar_int_mode index_mode
= SImode
;
12775 rtx op1
, op2
, index
;
12777 if (! targetm
.have_casesi ())
12780 /* The index must be some form of integer. Convert it to SImode. */
12781 scalar_int_mode omode
= SCALAR_INT_TYPE_MODE (index_type
);
12782 if (GET_MODE_BITSIZE (omode
) > GET_MODE_BITSIZE (index_mode
))
12784 rtx rangertx
= expand_normal (range
);
12786 /* We must handle the endpoints in the original mode. */
12787 index_expr
= build2 (MINUS_EXPR
, index_type
,
12788 index_expr
, minval
);
12789 minval
= integer_zero_node
;
12790 index
= expand_normal (index_expr
);
12792 emit_cmp_and_jump_insns (rangertx
, index
, LTU
, NULL_RTX
,
12793 omode
, 1, default_label
,
12794 default_probability
);
12795 /* Now we can safely truncate. */
12796 index
= convert_to_mode (index_mode
, index
, 0);
12800 if (omode
!= index_mode
)
12802 index_type
= lang_hooks
.types
.type_for_mode (index_mode
, 0);
12803 index_expr
= fold_convert (index_type
, index_expr
);
12806 index
= expand_normal (index_expr
);
12809 do_pending_stack_adjust ();
12811 op1
= expand_normal (minval
);
12812 op2
= expand_normal (range
);
12814 create_input_operand (&ops
[0], index
, index_mode
);
12815 create_convert_operand_from_type (&ops
[1], op1
, TREE_TYPE (minval
));
12816 create_convert_operand_from_type (&ops
[2], op2
, TREE_TYPE (range
));
12817 create_fixed_operand (&ops
[3], table_label
);
12818 create_fixed_operand (&ops
[4], (default_label
12820 : fallback_label
));
12821 expand_jump_insn (targetm
.code_for_casesi
, 5, ops
);
12825 /* Attempt to generate a tablejump instruction; same concept. */
12826 /* Subroutine of the next function.
12828 INDEX is the value being switched on, with the lowest value
12829 in the table already subtracted.
12830 MODE is its expected mode (needed if INDEX is constant).
12831 RANGE is the length of the jump table.
12832 TABLE_LABEL is a CODE_LABEL rtx for the table itself.
12834 DEFAULT_LABEL is a CODE_LABEL rtx to jump to if the
12835 index value is out of range.
12836 DEFAULT_PROBABILITY is the probability of jumping to
12837 the default label. */
12840 do_tablejump (rtx index
, machine_mode mode
, rtx range
, rtx table_label
,
12841 rtx default_label
, profile_probability default_probability
)
12845 if (INTVAL (range
) > cfun
->cfg
->max_jumptable_ents
)
12846 cfun
->cfg
->max_jumptable_ents
= INTVAL (range
);
12848 /* Do an unsigned comparison (in the proper mode) between the index
12849 expression and the value which represents the length of the range.
12850 Since we just finished subtracting the lower bound of the range
12851 from the index expression, this comparison allows us to simultaneously
12852 check that the original index expression value is both greater than
12853 or equal to the minimum value of the range and less than or equal to
12854 the maximum value of the range. */
12857 emit_cmp_and_jump_insns (index
, range
, GTU
, NULL_RTX
, mode
, 1,
12858 default_label
, default_probability
);
12860 /* If index is in range, it must fit in Pmode.
12861 Convert to Pmode so we can index with it. */
12864 unsigned int width
;
12866 /* We know the value of INDEX is between 0 and RANGE. If we have a
12867 sign-extended subreg, and RANGE does not have the sign bit set, then
12868 we have a value that is valid for both sign and zero extension. In
12869 this case, we get better code if we sign extend. */
12870 if (GET_CODE (index
) == SUBREG
12871 && SUBREG_PROMOTED_VAR_P (index
)
12872 && SUBREG_PROMOTED_SIGNED_P (index
)
12873 && ((width
= GET_MODE_PRECISION (as_a
<scalar_int_mode
> (mode
)))
12874 <= HOST_BITS_PER_WIDE_INT
)
12875 && ! (UINTVAL (range
) & (HOST_WIDE_INT_1U
<< (width
- 1))))
12876 index
= convert_to_mode (Pmode
, index
, 0);
12878 index
= convert_to_mode (Pmode
, index
, 1);
12881 /* Don't let a MEM slip through, because then INDEX that comes
12882 out of PIC_CASE_VECTOR_ADDRESS won't be a valid address,
12883 and break_out_memory_refs will go to work on it and mess it up. */
12884 #ifdef PIC_CASE_VECTOR_ADDRESS
12885 if (flag_pic
&& !REG_P (index
))
12886 index
= copy_to_mode_reg (Pmode
, index
);
12889 /* ??? The only correct use of CASE_VECTOR_MODE is the one inside the
12890 GET_MODE_SIZE, because this indicates how large insns are. The other
12891 uses should all be Pmode, because they are addresses. This code
12892 could fail if addresses and insns are not the same size. */
12893 index
= simplify_gen_binary (MULT
, Pmode
, index
,
12894 gen_int_mode (GET_MODE_SIZE (CASE_VECTOR_MODE
),
12896 index
= simplify_gen_binary (PLUS
, Pmode
, index
,
12897 gen_rtx_LABEL_REF (Pmode
, table_label
));
12899 #ifdef PIC_CASE_VECTOR_ADDRESS
12901 index
= PIC_CASE_VECTOR_ADDRESS (index
);
12904 index
= memory_address (CASE_VECTOR_MODE
, index
);
12905 temp
= gen_reg_rtx (CASE_VECTOR_MODE
);
12906 vector
= gen_const_mem (CASE_VECTOR_MODE
, index
);
12907 convert_move (temp
, vector
, 0);
12909 emit_jump_insn (targetm
.gen_tablejump (temp
, table_label
));
12911 /* If we are generating PIC code or if the table is PC-relative, the
12912 table and JUMP_INSN must be adjacent, so don't output a BARRIER. */
12913 if (! CASE_VECTOR_PC_RELATIVE
&& ! flag_pic
)
12918 try_tablejump (tree index_type
, tree index_expr
, tree minval
, tree range
,
12919 rtx table_label
, rtx default_label
,
12920 profile_probability default_probability
)
12924 if (! targetm
.have_tablejump ())
12927 index_expr
= fold_build2 (MINUS_EXPR
, index_type
,
12928 fold_convert (index_type
, index_expr
),
12929 fold_convert (index_type
, minval
));
12930 index
= expand_normal (index_expr
);
12931 do_pending_stack_adjust ();
12933 do_tablejump (index
, TYPE_MODE (index_type
),
12934 convert_modes (TYPE_MODE (index_type
),
12935 TYPE_MODE (TREE_TYPE (range
)),
12936 expand_normal (range
),
12937 TYPE_UNSIGNED (TREE_TYPE (range
))),
12938 table_label
, default_label
, default_probability
);
12942 /* Return a CONST_VECTOR rtx representing vector mask for
12943 a VECTOR_CST of booleans. */
12945 const_vector_mask_from_tree (tree exp
)
12947 machine_mode mode
= TYPE_MODE (TREE_TYPE (exp
));
12948 machine_mode inner
= GET_MODE_INNER (mode
);
12950 rtx_vector_builder
builder (mode
, VECTOR_CST_NPATTERNS (exp
),
12951 VECTOR_CST_NELTS_PER_PATTERN (exp
));
12952 unsigned int count
= builder
.encoded_nelts ();
12953 for (unsigned int i
= 0; i
< count
; ++i
)
12955 tree elt
= VECTOR_CST_ELT (exp
, i
);
12956 gcc_assert (TREE_CODE (elt
) == INTEGER_CST
);
12957 if (integer_zerop (elt
))
12958 builder
.quick_push (CONST0_RTX (inner
));
12959 else if (integer_onep (elt
)
12960 || integer_minus_onep (elt
))
12961 builder
.quick_push (CONSTM1_RTX (inner
));
12963 gcc_unreachable ();
12965 return builder
.build ();
12968 /* Return a CONST_VECTOR rtx for a VECTOR_CST tree. */
12970 const_vector_from_tree (tree exp
)
12972 machine_mode mode
= TYPE_MODE (TREE_TYPE (exp
));
12974 if (initializer_zerop (exp
))
12975 return CONST0_RTX (mode
);
12977 if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp
)))
12978 return const_vector_mask_from_tree (exp
);
12980 machine_mode inner
= GET_MODE_INNER (mode
);
12982 rtx_vector_builder
builder (mode
, VECTOR_CST_NPATTERNS (exp
),
12983 VECTOR_CST_NELTS_PER_PATTERN (exp
));
12984 unsigned int count
= builder
.encoded_nelts ();
12985 for (unsigned int i
= 0; i
< count
; ++i
)
12987 tree elt
= VECTOR_CST_ELT (exp
, i
);
12988 if (TREE_CODE (elt
) == REAL_CST
)
12989 builder
.quick_push (const_double_from_real_value (TREE_REAL_CST (elt
),
12991 else if (TREE_CODE (elt
) == FIXED_CST
)
12992 builder
.quick_push (CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt
),
12995 builder
.quick_push (immed_wide_int_const (wi::to_poly_wide (elt
),
12998 return builder
.build ();
13001 /* Build a decl for a personality function given a language prefix. */
13004 build_personality_function (const char *lang
)
13006 const char *unwind_and_version
;
13010 switch (targetm_common
.except_unwind_info (&global_options
))
13015 unwind_and_version
= "_sj0";
13019 unwind_and_version
= "_v0";
13022 unwind_and_version
= "_seh0";
13025 gcc_unreachable ();
13028 name
= ACONCAT (("__", lang
, "_personality", unwind_and_version
, NULL
));
13030 type
= build_function_type_list (unsigned_type_node
,
13031 integer_type_node
, integer_type_node
,
13032 long_long_unsigned_type_node
,
13033 ptr_type_node
, ptr_type_node
, NULL_TREE
);
13034 decl
= build_decl (UNKNOWN_LOCATION
, FUNCTION_DECL
,
13035 get_identifier (name
), type
);
13036 DECL_ARTIFICIAL (decl
) = 1;
13037 DECL_EXTERNAL (decl
) = 1;
13038 TREE_PUBLIC (decl
) = 1;
13040 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
13041 are the flags assigned by targetm.encode_section_info. */
13042 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl
), 0), NULL
);
13047 /* Extracts the personality function of DECL and returns the corresponding
13051 get_personality_function (tree decl
)
13053 tree personality
= DECL_FUNCTION_PERSONALITY (decl
);
13054 enum eh_personality_kind pk
;
13056 pk
= function_needs_eh_personality (DECL_STRUCT_FUNCTION (decl
));
13057 if (pk
== eh_personality_none
)
13061 && pk
== eh_personality_any
)
13062 personality
= lang_hooks
.eh_personality ();
13064 if (pk
== eh_personality_lang
)
13065 gcc_assert (personality
!= NULL_TREE
);
13067 return XEXP (DECL_RTL (personality
), 0);
13070 /* Returns a tree for the size of EXP in bytes. */
13073 tree_expr_size (const_tree exp
)
13076 && DECL_SIZE_UNIT (exp
) != 0)
13077 return DECL_SIZE_UNIT (exp
);
13079 return size_in_bytes (TREE_TYPE (exp
));
13082 /* Return an rtx for the size in bytes of the value of EXP. */
13085 expr_size (tree exp
)
13089 if (TREE_CODE (exp
) == WITH_SIZE_EXPR
)
13090 size
= TREE_OPERAND (exp
, 1);
13093 size
= tree_expr_size (exp
);
13095 gcc_assert (size
== SUBSTITUTE_PLACEHOLDER_IN_EXPR (size
, exp
));
13098 return expand_expr (size
, NULL_RTX
, TYPE_MODE (sizetype
), EXPAND_NORMAL
);
13101 /* Return a wide integer for the size in bytes of the value of EXP, or -1
13102 if the size can vary or is larger than an integer. */
13104 static HOST_WIDE_INT
13105 int_expr_size (tree exp
)
13109 if (TREE_CODE (exp
) == WITH_SIZE_EXPR
)
13110 size
= TREE_OPERAND (exp
, 1);
13113 size
= tree_expr_size (exp
);
13117 if (size
== 0 || !tree_fits_shwi_p (size
))
13120 return tree_to_shwi (size
);