1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987-2017 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
42 #include "langhooks.h"
44 struct target_expmed default_target_expmed
;
46 struct target_expmed
*this_target_expmed
= &default_target_expmed
;
49 static void store_fixed_bit_field (rtx
, opt_scalar_int_mode
,
50 unsigned HOST_WIDE_INT
,
51 unsigned HOST_WIDE_INT
,
52 unsigned HOST_WIDE_INT
,
53 unsigned HOST_WIDE_INT
,
54 rtx
, scalar_int_mode
, bool);
55 static void store_fixed_bit_field_1 (rtx
, scalar_int_mode
,
56 unsigned HOST_WIDE_INT
,
57 unsigned HOST_WIDE_INT
,
58 rtx
, scalar_int_mode
, bool);
59 static void store_split_bit_field (rtx
, opt_scalar_int_mode
,
60 unsigned HOST_WIDE_INT
,
61 unsigned HOST_WIDE_INT
,
62 unsigned HOST_WIDE_INT
,
63 unsigned HOST_WIDE_INT
,
64 rtx
, scalar_int_mode
, bool);
65 static rtx
extract_fixed_bit_field (machine_mode
, rtx
, opt_scalar_int_mode
,
66 unsigned HOST_WIDE_INT
,
67 unsigned HOST_WIDE_INT
, rtx
, int, bool);
68 static rtx
extract_fixed_bit_field_1 (machine_mode
, rtx
, scalar_int_mode
,
69 unsigned HOST_WIDE_INT
,
70 unsigned HOST_WIDE_INT
, rtx
, int, bool);
71 static rtx
lshift_value (machine_mode
, unsigned HOST_WIDE_INT
, int);
72 static rtx
extract_split_bit_field (rtx
, opt_scalar_int_mode
,
73 unsigned HOST_WIDE_INT
,
74 unsigned HOST_WIDE_INT
, int, bool);
75 static void do_cmp_and_jump (rtx
, rtx
, enum rtx_code
, machine_mode
, rtx_code_label
*);
76 static rtx
expand_smod_pow2 (scalar_int_mode
, rtx
, HOST_WIDE_INT
);
77 static rtx
expand_sdiv_pow2 (scalar_int_mode
, rtx
, HOST_WIDE_INT
);
79 /* Return a constant integer mask value of mode MODE with BITSIZE ones
80 followed by BITPOS zeros, or the complement of that if COMPLEMENT.
81 The mask is truncated if necessary to the width of mode MODE. The
82 mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */
85 mask_rtx (scalar_int_mode mode
, int bitpos
, int bitsize
, bool complement
)
87 return immed_wide_int_const
88 (wi::shifted_mask (bitpos
, bitsize
, complement
,
89 GET_MODE_PRECISION (mode
)), mode
);
92 /* Test whether a value is zero of a power of two. */
93 #define EXACT_POWER_OF_2_OR_ZERO_P(x) \
94 (((x) & ((x) - HOST_WIDE_INT_1U)) == 0)
96 struct init_expmed_rtl
117 rtx pow2
[MAX_BITS_PER_WORD
];
118 rtx cint
[MAX_BITS_PER_WORD
];
122 init_expmed_one_conv (struct init_expmed_rtl
*all
, scalar_int_mode to_mode
,
123 scalar_int_mode from_mode
, bool speed
)
125 int to_size
, from_size
;
128 to_size
= GET_MODE_PRECISION (to_mode
);
129 from_size
= GET_MODE_PRECISION (from_mode
);
131 /* Most partial integers have a precision less than the "full"
132 integer it requires for storage. In case one doesn't, for
133 comparison purposes here, reduce the bit size by one in that
135 if (GET_MODE_CLASS (to_mode
) == MODE_PARTIAL_INT
136 && pow2p_hwi (to_size
))
138 if (GET_MODE_CLASS (from_mode
) == MODE_PARTIAL_INT
139 && pow2p_hwi (from_size
))
142 /* Assume cost of zero-extend and sign-extend is the same. */
143 which
= (to_size
< from_size
? all
->trunc
: all
->zext
);
145 PUT_MODE (all
->reg
, from_mode
);
146 set_convert_cost (to_mode
, from_mode
, speed
,
147 set_src_cost (which
, to_mode
, speed
));
151 init_expmed_one_mode (struct init_expmed_rtl
*all
,
152 machine_mode mode
, int speed
)
154 int m
, n
, mode_bitsize
;
155 machine_mode mode_from
;
157 mode_bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
159 PUT_MODE (all
->reg
, mode
);
160 PUT_MODE (all
->plus
, mode
);
161 PUT_MODE (all
->neg
, mode
);
162 PUT_MODE (all
->mult
, mode
);
163 PUT_MODE (all
->sdiv
, mode
);
164 PUT_MODE (all
->udiv
, mode
);
165 PUT_MODE (all
->sdiv_32
, mode
);
166 PUT_MODE (all
->smod_32
, mode
);
167 PUT_MODE (all
->wide_trunc
, mode
);
168 PUT_MODE (all
->shift
, mode
);
169 PUT_MODE (all
->shift_mult
, mode
);
170 PUT_MODE (all
->shift_add
, mode
);
171 PUT_MODE (all
->shift_sub0
, mode
);
172 PUT_MODE (all
->shift_sub1
, mode
);
173 PUT_MODE (all
->zext
, mode
);
174 PUT_MODE (all
->trunc
, mode
);
176 set_add_cost (speed
, mode
, set_src_cost (all
->plus
, mode
, speed
));
177 set_neg_cost (speed
, mode
, set_src_cost (all
->neg
, mode
, speed
));
178 set_mul_cost (speed
, mode
, set_src_cost (all
->mult
, mode
, speed
));
179 set_sdiv_cost (speed
, mode
, set_src_cost (all
->sdiv
, mode
, speed
));
180 set_udiv_cost (speed
, mode
, set_src_cost (all
->udiv
, mode
, speed
));
182 set_sdiv_pow2_cheap (speed
, mode
, (set_src_cost (all
->sdiv_32
, mode
, speed
)
183 <= 2 * add_cost (speed
, mode
)));
184 set_smod_pow2_cheap (speed
, mode
, (set_src_cost (all
->smod_32
, mode
, speed
)
185 <= 4 * add_cost (speed
, mode
)));
187 set_shift_cost (speed
, mode
, 0, 0);
189 int cost
= add_cost (speed
, mode
);
190 set_shiftadd_cost (speed
, mode
, 0, cost
);
191 set_shiftsub0_cost (speed
, mode
, 0, cost
);
192 set_shiftsub1_cost (speed
, mode
, 0, cost
);
195 n
= MIN (MAX_BITS_PER_WORD
, mode_bitsize
);
196 for (m
= 1; m
< n
; m
++)
198 XEXP (all
->shift
, 1) = all
->cint
[m
];
199 XEXP (all
->shift_mult
, 1) = all
->pow2
[m
];
201 set_shift_cost (speed
, mode
, m
, set_src_cost (all
->shift
, mode
, speed
));
202 set_shiftadd_cost (speed
, mode
, m
, set_src_cost (all
->shift_add
, mode
,
204 set_shiftsub0_cost (speed
, mode
, m
, set_src_cost (all
->shift_sub0
, mode
,
206 set_shiftsub1_cost (speed
, mode
, m
, set_src_cost (all
->shift_sub1
, mode
,
210 scalar_int_mode int_mode_to
;
211 if (is_a
<scalar_int_mode
> (mode
, &int_mode_to
))
213 for (mode_from
= MIN_MODE_INT
; mode_from
<= MAX_MODE_INT
;
214 mode_from
= (machine_mode
)(mode_from
+ 1))
215 init_expmed_one_conv (all
, int_mode_to
,
216 as_a
<scalar_int_mode
> (mode_from
), speed
);
218 scalar_int_mode wider_mode
;
219 if (GET_MODE_CLASS (int_mode_to
) == MODE_INT
220 && GET_MODE_WIDER_MODE (int_mode_to
).exists (&wider_mode
))
222 PUT_MODE (all
->zext
, wider_mode
);
223 PUT_MODE (all
->wide_mult
, wider_mode
);
224 PUT_MODE (all
->wide_lshr
, wider_mode
);
225 XEXP (all
->wide_lshr
, 1) = GEN_INT (mode_bitsize
);
227 set_mul_widen_cost (speed
, wider_mode
,
228 set_src_cost (all
->wide_mult
, wider_mode
, speed
));
229 set_mul_highpart_cost (speed
, int_mode_to
,
230 set_src_cost (all
->wide_trunc
,
231 int_mode_to
, speed
));
239 struct init_expmed_rtl all
;
240 machine_mode mode
= QImode
;
243 memset (&all
, 0, sizeof all
);
244 for (m
= 1; m
< MAX_BITS_PER_WORD
; m
++)
246 all
.pow2
[m
] = GEN_INT (HOST_WIDE_INT_1
<< m
);
247 all
.cint
[m
] = GEN_INT (m
);
250 /* Avoid using hard regs in ways which may be unsupported. */
251 all
.reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
252 all
.plus
= gen_rtx_PLUS (mode
, all
.reg
, all
.reg
);
253 all
.neg
= gen_rtx_NEG (mode
, all
.reg
);
254 all
.mult
= gen_rtx_MULT (mode
, all
.reg
, all
.reg
);
255 all
.sdiv
= gen_rtx_DIV (mode
, all
.reg
, all
.reg
);
256 all
.udiv
= gen_rtx_UDIV (mode
, all
.reg
, all
.reg
);
257 all
.sdiv_32
= gen_rtx_DIV (mode
, all
.reg
, all
.pow2
[5]);
258 all
.smod_32
= gen_rtx_MOD (mode
, all
.reg
, all
.pow2
[5]);
259 all
.zext
= gen_rtx_ZERO_EXTEND (mode
, all
.reg
);
260 all
.wide_mult
= gen_rtx_MULT (mode
, all
.zext
, all
.zext
);
261 all
.wide_lshr
= gen_rtx_LSHIFTRT (mode
, all
.wide_mult
, all
.reg
);
262 all
.wide_trunc
= gen_rtx_TRUNCATE (mode
, all
.wide_lshr
);
263 all
.shift
= gen_rtx_ASHIFT (mode
, all
.reg
, all
.reg
);
264 all
.shift_mult
= gen_rtx_MULT (mode
, all
.reg
, all
.reg
);
265 all
.shift_add
= gen_rtx_PLUS (mode
, all
.shift_mult
, all
.reg
);
266 all
.shift_sub0
= gen_rtx_MINUS (mode
, all
.shift_mult
, all
.reg
);
267 all
.shift_sub1
= gen_rtx_MINUS (mode
, all
.reg
, all
.shift_mult
);
268 all
.trunc
= gen_rtx_TRUNCATE (mode
, all
.reg
);
270 for (speed
= 0; speed
< 2; speed
++)
272 crtl
->maybe_hot_insn_p
= speed
;
273 set_zero_cost (speed
, set_src_cost (const0_rtx
, mode
, speed
));
275 for (mode
= MIN_MODE_INT
; mode
<= MAX_MODE_INT
;
276 mode
= (machine_mode
)(mode
+ 1))
277 init_expmed_one_mode (&all
, mode
, speed
);
279 if (MIN_MODE_PARTIAL_INT
!= VOIDmode
)
280 for (mode
= MIN_MODE_PARTIAL_INT
; mode
<= MAX_MODE_PARTIAL_INT
;
281 mode
= (machine_mode
)(mode
+ 1))
282 init_expmed_one_mode (&all
, mode
, speed
);
284 if (MIN_MODE_VECTOR_INT
!= VOIDmode
)
285 for (mode
= MIN_MODE_VECTOR_INT
; mode
<= MAX_MODE_VECTOR_INT
;
286 mode
= (machine_mode
)(mode
+ 1))
287 init_expmed_one_mode (&all
, mode
, speed
);
290 if (alg_hash_used_p ())
292 struct alg_hash_entry
*p
= alg_hash_entry_ptr (0);
293 memset (p
, 0, sizeof (*p
) * NUM_ALG_HASH_ENTRIES
);
296 set_alg_hash_used_p (true);
297 default_rtl_profile ();
299 ggc_free (all
.trunc
);
300 ggc_free (all
.shift_sub1
);
301 ggc_free (all
.shift_sub0
);
302 ggc_free (all
.shift_add
);
303 ggc_free (all
.shift_mult
);
304 ggc_free (all
.shift
);
305 ggc_free (all
.wide_trunc
);
306 ggc_free (all
.wide_lshr
);
307 ggc_free (all
.wide_mult
);
309 ggc_free (all
.smod_32
);
310 ggc_free (all
.sdiv_32
);
319 /* Return an rtx representing minus the value of X.
320 MODE is the intended mode of the result,
321 useful if X is a CONST_INT. */
324 negate_rtx (machine_mode mode
, rtx x
)
326 rtx result
= simplify_unary_operation (NEG
, mode
, x
, mode
);
329 result
= expand_unop (mode
, neg_optab
, x
, NULL_RTX
, 0);
334 /* Whether reverse storage order is supported on the target. */
335 static int reverse_storage_order_supported
= -1;
337 /* Check whether reverse storage order is supported on the target. */
340 check_reverse_storage_order_support (void)
342 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
344 reverse_storage_order_supported
= 0;
345 sorry ("reverse scalar storage order");
348 reverse_storage_order_supported
= 1;
351 /* Whether reverse FP storage order is supported on the target. */
352 static int reverse_float_storage_order_supported
= -1;
354 /* Check whether reverse FP storage order is supported on the target. */
357 check_reverse_float_storage_order_support (void)
359 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
361 reverse_float_storage_order_supported
= 0;
362 sorry ("reverse floating-point scalar storage order");
365 reverse_float_storage_order_supported
= 1;
368 /* Return an rtx representing value of X with reverse storage order.
369 MODE is the intended mode of the result,
370 useful if X is a CONST_INT. */
373 flip_storage_order (machine_mode mode
, rtx x
)
375 scalar_int_mode int_mode
;
381 if (COMPLEX_MODE_P (mode
))
383 rtx real
= read_complex_part (x
, false);
384 rtx imag
= read_complex_part (x
, true);
386 real
= flip_storage_order (GET_MODE_INNER (mode
), real
);
387 imag
= flip_storage_order (GET_MODE_INNER (mode
), imag
);
389 return gen_rtx_CONCAT (mode
, real
, imag
);
392 if (__builtin_expect (reverse_storage_order_supported
< 0, 0))
393 check_reverse_storage_order_support ();
395 if (!is_a
<scalar_int_mode
> (mode
, &int_mode
))
397 if (FLOAT_MODE_P (mode
)
398 && __builtin_expect (reverse_float_storage_order_supported
< 0, 0))
399 check_reverse_float_storage_order_support ();
401 if (!int_mode_for_size (GET_MODE_PRECISION (mode
), 0).exists (&int_mode
))
403 sorry ("reverse storage order for %smode", GET_MODE_NAME (mode
));
406 x
= gen_lowpart (int_mode
, x
);
409 result
= simplify_unary_operation (BSWAP
, int_mode
, x
, int_mode
);
411 result
= expand_unop (int_mode
, bswap_optab
, x
, NULL_RTX
, 1);
413 if (int_mode
!= mode
)
414 result
= gen_lowpart (mode
, result
);
419 /* If MODE is set, adjust bitfield memory MEM so that it points to the
420 first unit of mode MODE that contains a bitfield of size BITSIZE at
421 bit position BITNUM. If MODE is not set, return a BLKmode reference
422 to every byte in the bitfield. Set *NEW_BITNUM to the bit position
423 of the field within the new memory. */
426 narrow_bit_field_mem (rtx mem
, opt_scalar_int_mode mode
,
427 unsigned HOST_WIDE_INT bitsize
,
428 unsigned HOST_WIDE_INT bitnum
,
429 unsigned HOST_WIDE_INT
*new_bitnum
)
431 scalar_int_mode imode
;
432 if (mode
.exists (&imode
))
434 unsigned int unit
= GET_MODE_BITSIZE (imode
);
435 *new_bitnum
= bitnum
% unit
;
436 HOST_WIDE_INT offset
= (bitnum
- *new_bitnum
) / BITS_PER_UNIT
;
437 return adjust_bitfield_address (mem
, imode
, offset
);
441 *new_bitnum
= bitnum
% BITS_PER_UNIT
;
442 HOST_WIDE_INT offset
= bitnum
/ BITS_PER_UNIT
;
443 HOST_WIDE_INT size
= ((*new_bitnum
+ bitsize
+ BITS_PER_UNIT
- 1)
445 return adjust_bitfield_address_size (mem
, BLKmode
, offset
, size
);
449 /* The caller wants to perform insertion or extraction PATTERN on a
450 bitfield of size BITSIZE at BITNUM bits into memory operand OP0.
451 BITREGION_START and BITREGION_END are as for store_bit_field
452 and FIELDMODE is the natural mode of the field.
454 Search for a mode that is compatible with the memory access
455 restrictions and (where applicable) with a register insertion or
456 extraction. Return the new memory on success, storing the adjusted
457 bit position in *NEW_BITNUM. Return null otherwise. */
460 adjust_bit_field_mem_for_reg (enum extraction_pattern pattern
,
461 rtx op0
, HOST_WIDE_INT bitsize
,
462 HOST_WIDE_INT bitnum
,
463 unsigned HOST_WIDE_INT bitregion_start
,
464 unsigned HOST_WIDE_INT bitregion_end
,
465 machine_mode fieldmode
,
466 unsigned HOST_WIDE_INT
*new_bitnum
)
468 bit_field_mode_iterator
iter (bitsize
, bitnum
, bitregion_start
,
469 bitregion_end
, MEM_ALIGN (op0
),
470 MEM_VOLATILE_P (op0
));
471 scalar_int_mode best_mode
;
472 if (iter
.next_mode (&best_mode
))
474 /* We can use a memory in BEST_MODE. See whether this is true for
475 any wider modes. All other things being equal, we prefer to
476 use the widest mode possible because it tends to expose more
477 CSE opportunities. */
478 if (!iter
.prefer_smaller_modes ())
480 /* Limit the search to the mode required by the corresponding
481 register insertion or extraction instruction, if any. */
482 scalar_int_mode limit_mode
= word_mode
;
483 extraction_insn insn
;
484 if (get_best_reg_extraction_insn (&insn
, pattern
,
485 GET_MODE_BITSIZE (best_mode
),
487 limit_mode
= insn
.field_mode
;
489 scalar_int_mode wider_mode
;
490 while (iter
.next_mode (&wider_mode
)
491 && GET_MODE_SIZE (wider_mode
) <= GET_MODE_SIZE (limit_mode
))
492 best_mode
= wider_mode
;
494 return narrow_bit_field_mem (op0
, best_mode
, bitsize
, bitnum
,
500 /* Return true if a bitfield of size BITSIZE at bit number BITNUM within
501 a structure of mode STRUCT_MODE represents a lowpart subreg. The subreg
502 offset is then BITNUM / BITS_PER_UNIT. */
505 lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum
,
506 unsigned HOST_WIDE_INT bitsize
,
507 machine_mode struct_mode
)
509 unsigned HOST_WIDE_INT regsize
= REGMODE_NATURAL_SIZE (struct_mode
);
510 if (BYTES_BIG_ENDIAN
)
511 return (bitnum
% BITS_PER_UNIT
== 0
512 && (bitnum
+ bitsize
== GET_MODE_BITSIZE (struct_mode
)
513 || (bitnum
+ bitsize
) % (regsize
* BITS_PER_UNIT
) == 0));
515 return bitnum
% (regsize
* BITS_PER_UNIT
) == 0;
518 /* Return true if -fstrict-volatile-bitfields applies to an access of OP0
519 containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.
520 Return false if the access would touch memory outside the range
521 BITREGION_START to BITREGION_END for conformance to the C++ memory
525 strict_volatile_bitfield_p (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
526 unsigned HOST_WIDE_INT bitnum
,
527 scalar_int_mode fieldmode
,
528 unsigned HOST_WIDE_INT bitregion_start
,
529 unsigned HOST_WIDE_INT bitregion_end
)
531 unsigned HOST_WIDE_INT modesize
= GET_MODE_BITSIZE (fieldmode
);
533 /* -fstrict-volatile-bitfields must be enabled and we must have a
536 || !MEM_VOLATILE_P (op0
)
537 || flag_strict_volatile_bitfields
<= 0)
540 /* The bit size must not be larger than the field mode, and
541 the field mode must not be larger than a word. */
542 if (bitsize
> modesize
|| modesize
> BITS_PER_WORD
)
545 /* Check for cases of unaligned fields that must be split. */
546 if (bitnum
% modesize
+ bitsize
> modesize
)
549 /* The memory must be sufficiently aligned for a MODESIZE access.
550 This condition guarantees, that the memory access will not
551 touch anything after the end of the structure. */
552 if (MEM_ALIGN (op0
) < modesize
)
555 /* Check for cases where the C++ memory model applies. */
556 if (bitregion_end
!= 0
557 && (bitnum
- bitnum
% modesize
< bitregion_start
558 || bitnum
- bitnum
% modesize
+ modesize
- 1 > bitregion_end
))
564 /* Return true if OP is a memory and if a bitfield of size BITSIZE at
565 bit number BITNUM can be treated as a simple value of mode MODE. */
568 simple_mem_bitfield_p (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
569 unsigned HOST_WIDE_INT bitnum
, machine_mode mode
)
572 && bitnum
% BITS_PER_UNIT
== 0
573 && bitsize
== GET_MODE_BITSIZE (mode
)
574 && (!targetm
.slow_unaligned_access (mode
, MEM_ALIGN (op0
))
575 || (bitnum
% GET_MODE_ALIGNMENT (mode
) == 0
576 && MEM_ALIGN (op0
) >= GET_MODE_ALIGNMENT (mode
))));
579 /* Try to use instruction INSV to store VALUE into a field of OP0.
580 If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is a
581 BLKmode MEM. VALUE_MODE is the mode of VALUE. BITSIZE and BITNUM
582 are as for store_bit_field. */
585 store_bit_field_using_insv (const extraction_insn
*insv
, rtx op0
,
586 opt_scalar_int_mode op0_mode
,
587 unsigned HOST_WIDE_INT bitsize
,
588 unsigned HOST_WIDE_INT bitnum
,
589 rtx value
, scalar_int_mode value_mode
)
591 struct expand_operand ops
[4];
594 rtx_insn
*last
= get_last_insn ();
595 bool copy_back
= false;
597 scalar_int_mode op_mode
= insv
->field_mode
;
598 unsigned int unit
= GET_MODE_BITSIZE (op_mode
);
599 if (bitsize
== 0 || bitsize
> unit
)
603 /* Get a reference to the first byte of the field. */
604 xop0
= narrow_bit_field_mem (xop0
, insv
->struct_mode
, bitsize
, bitnum
,
608 /* Convert from counting within OP0 to counting in OP_MODE. */
609 if (BYTES_BIG_ENDIAN
)
610 bitnum
+= unit
- GET_MODE_BITSIZE (op0_mode
.require ());
612 /* If xop0 is a register, we need it in OP_MODE
613 to make it acceptable to the format of insv. */
614 if (GET_CODE (xop0
) == SUBREG
)
615 /* We can't just change the mode, because this might clobber op0,
616 and we will need the original value of op0 if insv fails. */
617 xop0
= gen_rtx_SUBREG (op_mode
, SUBREG_REG (xop0
), SUBREG_BYTE (xop0
));
618 if (REG_P (xop0
) && GET_MODE (xop0
) != op_mode
)
619 xop0
= gen_lowpart_SUBREG (op_mode
, xop0
);
622 /* If the destination is a paradoxical subreg such that we need a
623 truncate to the inner mode, perform the insertion on a temporary and
624 truncate the result to the original destination. Note that we can't
625 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
626 X) 0)) is (reg:N X). */
627 if (GET_CODE (xop0
) == SUBREG
628 && REG_P (SUBREG_REG (xop0
))
629 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0
)),
632 rtx tem
= gen_reg_rtx (op_mode
);
633 emit_move_insn (tem
, xop0
);
638 /* There are similar overflow check at the start of store_bit_field_1,
639 but that only check the situation where the field lies completely
640 outside the register, while there do have situation where the field
641 lies partialy in the register, we need to adjust bitsize for this
642 partial overflow situation. Without this fix, pr48335-2.c on big-endian
643 will broken on those arch support bit insert instruction, like arm, aarch64
645 if (bitsize
+ bitnum
> unit
&& bitnum
< unit
)
647 warning (OPT_Wextra
, "write of %wu-bit data outside the bound of "
648 "destination object, data truncated into %wu-bit",
649 bitsize
, unit
- bitnum
);
650 bitsize
= unit
- bitnum
;
653 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
654 "backwards" from the size of the unit we are inserting into.
655 Otherwise, we count bits from the most significant on a
656 BYTES/BITS_BIG_ENDIAN machine. */
658 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
659 bitnum
= unit
- bitsize
- bitnum
;
661 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
663 if (value_mode
!= op_mode
)
665 if (GET_MODE_BITSIZE (value_mode
) >= bitsize
)
668 /* Optimization: Don't bother really extending VALUE
669 if it has all the bits we will actually use. However,
670 if we must narrow it, be sure we do it correctly. */
672 if (GET_MODE_SIZE (value_mode
) < GET_MODE_SIZE (op_mode
))
674 tmp
= simplify_subreg (op_mode
, value1
, value_mode
, 0);
676 tmp
= simplify_gen_subreg (op_mode
,
677 force_reg (value_mode
, value1
),
682 tmp
= gen_lowpart_if_possible (op_mode
, value1
);
684 tmp
= gen_lowpart (op_mode
, force_reg (value_mode
, value1
));
688 else if (CONST_INT_P (value
))
689 value1
= gen_int_mode (INTVAL (value
), op_mode
);
691 /* Parse phase is supposed to make VALUE's data type
692 match that of the component reference, which is a type
693 at least as wide as the field; so VALUE should have
694 a mode that corresponds to that type. */
695 gcc_assert (CONSTANT_P (value
));
698 create_fixed_operand (&ops
[0], xop0
);
699 create_integer_operand (&ops
[1], bitsize
);
700 create_integer_operand (&ops
[2], bitnum
);
701 create_input_operand (&ops
[3], value1
, op_mode
);
702 if (maybe_expand_insn (insv
->icode
, 4, ops
))
705 convert_move (op0
, xop0
, true);
708 delete_insns_since (last
);
712 /* A subroutine of store_bit_field, with the same arguments. Return true
713 if the operation could be implemented.
715 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
716 no other way of implementing the operation. If FALLBACK_P is false,
717 return false instead. */
720 store_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
721 unsigned HOST_WIDE_INT bitnum
,
722 unsigned HOST_WIDE_INT bitregion_start
,
723 unsigned HOST_WIDE_INT bitregion_end
,
724 machine_mode fieldmode
,
725 rtx value
, bool reverse
, bool fallback_p
)
730 while (GET_CODE (op0
) == SUBREG
)
732 bitnum
+= subreg_memory_offset (op0
) * BITS_PER_UNIT
;
733 op0
= SUBREG_REG (op0
);
736 /* No action is needed if the target is a register and if the field
737 lies completely outside that register. This can occur if the source
738 code contains an out-of-bounds access to a small array. */
739 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
742 /* Use vec_set patterns for inserting parts of vectors whenever
744 machine_mode outermode
= GET_MODE (op0
);
745 scalar_mode innermode
= GET_MODE_INNER (outermode
);
746 if (VECTOR_MODE_P (outermode
)
748 && optab_handler (vec_set_optab
, outermode
) != CODE_FOR_nothing
749 && fieldmode
== innermode
750 && bitsize
== GET_MODE_BITSIZE (innermode
)
751 && !(bitnum
% GET_MODE_BITSIZE (innermode
)))
753 struct expand_operand ops
[3];
754 enum insn_code icode
= optab_handler (vec_set_optab
, outermode
);
755 int pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
757 create_fixed_operand (&ops
[0], op0
);
758 create_input_operand (&ops
[1], value
, innermode
);
759 create_integer_operand (&ops
[2], pos
);
760 if (maybe_expand_insn (icode
, 3, ops
))
764 /* If the target is a register, overwriting the entire object, or storing
765 a full-word or multi-word field can be done with just a SUBREG. */
767 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
768 && ((bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)) && bitnum
== 0)
769 || (bitsize
% BITS_PER_WORD
== 0 && bitnum
% BITS_PER_WORD
== 0)))
771 /* Use the subreg machinery either to narrow OP0 to the required
772 words or to cope with mode punning between equal-sized modes.
773 In the latter case, use subreg on the rhs side, not lhs. */
776 if (bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
778 sub
= simplify_gen_subreg (GET_MODE (op0
), value
, fieldmode
, 0);
782 sub
= flip_storage_order (GET_MODE (op0
), sub
);
783 emit_move_insn (op0
, sub
);
789 sub
= simplify_gen_subreg (fieldmode
, op0
, GET_MODE (op0
),
790 bitnum
/ BITS_PER_UNIT
);
794 value
= flip_storage_order (fieldmode
, value
);
795 emit_move_insn (sub
, value
);
801 /* If the target is memory, storing any naturally aligned field can be
802 done with a simple store. For targets that support fast unaligned
803 memory, any naturally sized, unit aligned field can be done directly. */
804 if (simple_mem_bitfield_p (op0
, bitsize
, bitnum
, fieldmode
))
806 op0
= adjust_bitfield_address (op0
, fieldmode
, bitnum
/ BITS_PER_UNIT
);
808 value
= flip_storage_order (fieldmode
, value
);
809 emit_move_insn (op0
, value
);
813 /* Make sure we are playing with integral modes. Pun with subregs
814 if we aren't. This must come after the entire register case above,
815 since that case is valid for any mode. The following cases are only
816 valid for integral modes. */
817 opt_scalar_int_mode op0_mode
= int_mode_for_mode (GET_MODE (op0
));
818 scalar_int_mode imode
;
819 if (!op0_mode
.exists (&imode
) || imode
!= GET_MODE (op0
))
822 op0
= adjust_bitfield_address_size (op0
, op0_mode
.else_blk (),
825 op0
= gen_lowpart (op0_mode
.require (), op0
);
828 /* Storing an lsb-aligned field in a register
829 can be done with a movstrict instruction. */
833 && lowpart_bit_field_p (bitnum
, bitsize
, GET_MODE (op0
))
834 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
835 && optab_handler (movstrict_optab
, fieldmode
) != CODE_FOR_nothing
)
837 struct expand_operand ops
[2];
838 enum insn_code icode
= optab_handler (movstrict_optab
, fieldmode
);
840 unsigned HOST_WIDE_INT subreg_off
;
842 if (GET_CODE (arg0
) == SUBREG
)
844 /* Else we've got some float mode source being extracted into
845 a different float mode destination -- this combination of
846 subregs results in Severe Tire Damage. */
847 gcc_assert (GET_MODE (SUBREG_REG (arg0
)) == fieldmode
848 || GET_MODE_CLASS (fieldmode
) == MODE_INT
849 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
);
850 arg0
= SUBREG_REG (arg0
);
853 subreg_off
= bitnum
/ BITS_PER_UNIT
;
854 if (validate_subreg (fieldmode
, GET_MODE (arg0
), arg0
, subreg_off
))
856 arg0
= gen_rtx_SUBREG (fieldmode
, arg0
, subreg_off
);
858 create_fixed_operand (&ops
[0], arg0
);
859 /* Shrink the source operand to FIELDMODE. */
860 create_convert_operand_to (&ops
[1], value
, fieldmode
, false);
861 if (maybe_expand_insn (icode
, 2, ops
))
866 /* Handle fields bigger than a word. */
868 if (bitsize
> BITS_PER_WORD
)
870 /* Here we transfer the words of the field
871 in the order least significant first.
872 This is because the most significant word is the one which may
874 However, only do that if the value is not BLKmode. */
876 const bool backwards
= WORDS_BIG_ENDIAN
&& fieldmode
!= BLKmode
;
877 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
881 /* This is the mode we must force value to, so that there will be enough
882 subwords to extract. Note that fieldmode will often (always?) be
883 VOIDmode, because that is what store_field uses to indicate that this
884 is a bit field, but passing VOIDmode to operand_subword_force
886 fieldmode
= GET_MODE (value
);
887 if (fieldmode
== VOIDmode
)
888 fieldmode
= smallest_int_mode_for_size (nwords
* BITS_PER_WORD
);
890 last
= get_last_insn ();
891 for (i
= 0; i
< nwords
; i
++)
893 /* If I is 0, use the low-order word in both field and target;
894 if I is 1, use the next to lowest word; and so on. */
895 unsigned int wordnum
= (backwards
896 ? GET_MODE_SIZE (fieldmode
) / UNITS_PER_WORD
899 unsigned int bit_offset
= (backwards
^ reverse
900 ? MAX ((int) bitsize
- ((int) i
+ 1)
903 : (int) i
* BITS_PER_WORD
);
904 rtx value_word
= operand_subword_force (value
, wordnum
, fieldmode
);
905 unsigned HOST_WIDE_INT new_bitsize
=
906 MIN (BITS_PER_WORD
, bitsize
- i
* BITS_PER_WORD
);
908 /* If the remaining chunk doesn't have full wordsize we have
909 to make sure that for big-endian machines the higher order
911 if (new_bitsize
< BITS_PER_WORD
&& BYTES_BIG_ENDIAN
&& !backwards
)
912 value_word
= simplify_expand_binop (word_mode
, lshr_optab
,
914 GEN_INT (BITS_PER_WORD
919 if (!store_bit_field_1 (op0
, new_bitsize
,
921 bitregion_start
, bitregion_end
,
923 value_word
, reverse
, fallback_p
))
925 delete_insns_since (last
);
932 /* If VALUE has a floating-point or complex mode, access it as an
933 integer of the corresponding size. This can occur on a machine
934 with 64 bit registers that uses SFmode for float. It can also
935 occur for unaligned float or complex fields. */
937 scalar_int_mode value_mode
;
938 if (GET_MODE (value
) == VOIDmode
)
939 /* By this point we've dealt with values that are bigger than a word,
940 so word_mode is a conservatively correct choice. */
941 value_mode
= word_mode
;
942 else if (!is_a
<scalar_int_mode
> (GET_MODE (value
), &value_mode
))
944 value_mode
= int_mode_for_mode (GET_MODE (value
)).require ();
945 value
= gen_reg_rtx (value_mode
);
946 emit_move_insn (gen_lowpart (GET_MODE (orig_value
), value
), orig_value
);
949 /* If OP0 is a multi-word register, narrow it to the affected word.
950 If the region spans two words, defer to store_split_bit_field.
951 Don't do this if op0 is a single hard register wider than word
952 such as a float or vector register. */
954 && GET_MODE_SIZE (op0_mode
.require ()) > UNITS_PER_WORD
956 || !HARD_REGISTER_P (op0
)
957 || hard_regno_nregs (REGNO (op0
), op0_mode
.require ()) != 1))
959 if (bitnum
% BITS_PER_WORD
+ bitsize
> BITS_PER_WORD
)
964 store_split_bit_field (op0
, op0_mode
, bitsize
, bitnum
,
965 bitregion_start
, bitregion_end
,
966 value
, value_mode
, reverse
);
969 op0
= simplify_gen_subreg (word_mode
, op0
, op0_mode
.require (),
970 bitnum
/ BITS_PER_WORD
* UNITS_PER_WORD
);
972 op0_mode
= word_mode
;
973 bitnum
%= BITS_PER_WORD
;
976 /* From here on we can assume that the field to be stored in fits
977 within a word. If the destination is a register, it too fits
980 extraction_insn insv
;
983 && get_best_reg_extraction_insn (&insv
, EP_insv
,
984 GET_MODE_BITSIZE (op0_mode
.require ()),
986 && store_bit_field_using_insv (&insv
, op0
, op0_mode
,
987 bitsize
, bitnum
, value
, value_mode
))
990 /* If OP0 is a memory, try copying it to a register and seeing if a
991 cheap register alternative is available. */
992 if (MEM_P (op0
) && !reverse
)
994 if (get_best_mem_extraction_insn (&insv
, EP_insv
, bitsize
, bitnum
,
996 && store_bit_field_using_insv (&insv
, op0
, op0_mode
,
997 bitsize
, bitnum
, value
, value_mode
))
1000 rtx_insn
*last
= get_last_insn ();
1002 /* Try loading part of OP0 into a register, inserting the bitfield
1003 into that, and then copying the result back to OP0. */
1004 unsigned HOST_WIDE_INT bitpos
;
1005 rtx xop0
= adjust_bit_field_mem_for_reg (EP_insv
, op0
, bitsize
, bitnum
,
1006 bitregion_start
, bitregion_end
,
1007 fieldmode
, &bitpos
);
1010 rtx tempreg
= copy_to_reg (xop0
);
1011 if (store_bit_field_1 (tempreg
, bitsize
, bitpos
,
1012 bitregion_start
, bitregion_end
,
1013 fieldmode
, orig_value
, reverse
, false))
1015 emit_move_insn (xop0
, tempreg
);
1018 delete_insns_since (last
);
1025 store_fixed_bit_field (op0
, op0_mode
, bitsize
, bitnum
, bitregion_start
,
1026 bitregion_end
, value
, value_mode
, reverse
);
1030 /* Generate code to store value from rtx VALUE
1031 into a bit-field within structure STR_RTX
1032 containing BITSIZE bits starting at bit BITNUM.
1034 BITREGION_START is bitpos of the first bitfield in this region.
1035 BITREGION_END is the bitpos of the ending bitfield in this region.
1036 These two fields are 0, if the C++ memory model does not apply,
1037 or we are not interested in keeping track of bitfield regions.
1039 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
1041 If REVERSE is true, the store is to be done in reverse order. */
1044 store_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1045 unsigned HOST_WIDE_INT bitnum
,
1046 unsigned HOST_WIDE_INT bitregion_start
,
1047 unsigned HOST_WIDE_INT bitregion_end
,
1048 machine_mode fieldmode
,
1049 rtx value
, bool reverse
)
1051 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
1052 scalar_int_mode int_mode
;
1053 if (is_a
<scalar_int_mode
> (fieldmode
, &int_mode
)
1054 && strict_volatile_bitfield_p (str_rtx
, bitsize
, bitnum
, int_mode
,
1055 bitregion_start
, bitregion_end
))
1057 /* Storing of a full word can be done with a simple store.
1058 We know here that the field can be accessed with one single
1059 instruction. For targets that support unaligned memory,
1060 an unaligned access may be necessary. */
1061 if (bitsize
== GET_MODE_BITSIZE (int_mode
))
1063 str_rtx
= adjust_bitfield_address (str_rtx
, int_mode
,
1064 bitnum
/ BITS_PER_UNIT
);
1066 value
= flip_storage_order (int_mode
, value
);
1067 gcc_assert (bitnum
% BITS_PER_UNIT
== 0);
1068 emit_move_insn (str_rtx
, value
);
1074 str_rtx
= narrow_bit_field_mem (str_rtx
, int_mode
, bitsize
, bitnum
,
1076 gcc_assert (bitnum
+ bitsize
<= GET_MODE_BITSIZE (int_mode
));
1077 temp
= copy_to_reg (str_rtx
);
1078 if (!store_bit_field_1 (temp
, bitsize
, bitnum
, 0, 0,
1079 int_mode
, value
, reverse
, true))
1082 emit_move_insn (str_rtx
, temp
);
1088 /* Under the C++0x memory model, we must not touch bits outside the
1089 bit region. Adjust the address to start at the beginning of the
1091 if (MEM_P (str_rtx
) && bitregion_start
> 0)
1093 scalar_int_mode best_mode
;
1094 machine_mode addr_mode
= VOIDmode
;
1095 HOST_WIDE_INT offset
, size
;
1097 gcc_assert ((bitregion_start
% BITS_PER_UNIT
) == 0);
1099 offset
= bitregion_start
/ BITS_PER_UNIT
;
1100 bitnum
-= bitregion_start
;
1101 size
= (bitnum
+ bitsize
+ BITS_PER_UNIT
- 1) / BITS_PER_UNIT
;
1102 bitregion_end
-= bitregion_start
;
1103 bitregion_start
= 0;
1104 if (get_best_mode (bitsize
, bitnum
,
1105 bitregion_start
, bitregion_end
,
1106 MEM_ALIGN (str_rtx
), INT_MAX
,
1107 MEM_VOLATILE_P (str_rtx
), &best_mode
))
1108 addr_mode
= best_mode
;
1109 str_rtx
= adjust_bitfield_address_size (str_rtx
, addr_mode
,
1113 if (!store_bit_field_1 (str_rtx
, bitsize
, bitnum
,
1114 bitregion_start
, bitregion_end
,
1115 fieldmode
, value
, reverse
, true))
1119 /* Use shifts and boolean operations to store VALUE into a bit field of
1120 width BITSIZE in OP0, starting at bit BITNUM. If OP0_MODE is defined,
1121 it is the mode of OP0, otherwise OP0 is a BLKmode MEM. VALUE_MODE is
1124 If REVERSE is true, the store is to be done in reverse order. */
1127 store_fixed_bit_field (rtx op0
, opt_scalar_int_mode op0_mode
,
1128 unsigned HOST_WIDE_INT bitsize
,
1129 unsigned HOST_WIDE_INT bitnum
,
1130 unsigned HOST_WIDE_INT bitregion_start
,
1131 unsigned HOST_WIDE_INT bitregion_end
,
1132 rtx value
, scalar_int_mode value_mode
, bool reverse
)
1134 /* There is a case not handled here:
1135 a structure with a known alignment of just a halfword
1136 and a field split across two aligned halfwords within the structure.
1137 Or likewise a structure with a known alignment of just a byte
1138 and a field split across two bytes.
1139 Such cases are not supposed to be able to occur. */
1141 scalar_int_mode best_mode
;
1144 unsigned int max_bitsize
= BITS_PER_WORD
;
1145 scalar_int_mode imode
;
1146 if (op0_mode
.exists (&imode
) && GET_MODE_BITSIZE (imode
) < max_bitsize
)
1147 max_bitsize
= GET_MODE_BITSIZE (imode
);
1149 if (!get_best_mode (bitsize
, bitnum
, bitregion_start
, bitregion_end
,
1150 MEM_ALIGN (op0
), max_bitsize
, MEM_VOLATILE_P (op0
),
1153 /* The only way this should occur is if the field spans word
1155 store_split_bit_field (op0
, op0_mode
, bitsize
, bitnum
,
1156 bitregion_start
, bitregion_end
,
1157 value
, value_mode
, reverse
);
1161 op0
= narrow_bit_field_mem (op0
, best_mode
, bitsize
, bitnum
, &bitnum
);
1164 best_mode
= op0_mode
.require ();
1166 store_fixed_bit_field_1 (op0
, best_mode
, bitsize
, bitnum
,
1167 value
, value_mode
, reverse
);
1170 /* Helper function for store_fixed_bit_field, stores
1171 the bit field always using MODE, which is the mode of OP0. The other
1172 arguments are as for store_fixed_bit_field. */
1175 store_fixed_bit_field_1 (rtx op0
, scalar_int_mode mode
,
1176 unsigned HOST_WIDE_INT bitsize
,
1177 unsigned HOST_WIDE_INT bitnum
,
1178 rtx value
, scalar_int_mode value_mode
, bool reverse
)
1184 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1185 for invalid input, such as f5 from gcc.dg/pr48335-2.c. */
1187 if (reverse
? !BYTES_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
1188 /* BITNUM is the distance between our msb
1189 and that of the containing datum.
1190 Convert it to the distance from the lsb. */
1191 bitnum
= GET_MODE_BITSIZE (mode
) - bitsize
- bitnum
;
1193 /* Now BITNUM is always the distance between our lsb
1196 /* Shift VALUE left by BITNUM bits. If VALUE is not constant,
1197 we must first convert its mode to MODE. */
1199 if (CONST_INT_P (value
))
1201 unsigned HOST_WIDE_INT v
= UINTVAL (value
);
1203 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
1204 v
&= (HOST_WIDE_INT_1U
<< bitsize
) - 1;
1208 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
1209 && v
== (HOST_WIDE_INT_1U
<< bitsize
) - 1)
1210 || (bitsize
== HOST_BITS_PER_WIDE_INT
1211 && v
== HOST_WIDE_INT_M1U
))
1214 value
= lshift_value (mode
, v
, bitnum
);
1218 int must_and
= (GET_MODE_BITSIZE (value_mode
) != bitsize
1219 && bitnum
+ bitsize
!= GET_MODE_BITSIZE (mode
));
1221 if (value_mode
!= mode
)
1222 value
= convert_to_mode (mode
, value
, 1);
1225 value
= expand_binop (mode
, and_optab
, value
,
1226 mask_rtx (mode
, 0, bitsize
, 0),
1227 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1229 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
1230 bitnum
, NULL_RTX
, 1);
1234 value
= flip_storage_order (mode
, value
);
1236 /* Now clear the chosen bits in OP0,
1237 except that if VALUE is -1 we need not bother. */
1238 /* We keep the intermediates in registers to allow CSE to combine
1239 consecutive bitfield assignments. */
1241 temp
= force_reg (mode
, op0
);
1245 rtx mask
= mask_rtx (mode
, bitnum
, bitsize
, 1);
1247 mask
= flip_storage_order (mode
, mask
);
1248 temp
= expand_binop (mode
, and_optab
, temp
, mask
,
1249 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1250 temp
= force_reg (mode
, temp
);
1253 /* Now logical-or VALUE into OP0, unless it is zero. */
1257 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
1258 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1259 temp
= force_reg (mode
, temp
);
1264 op0
= copy_rtx (op0
);
1265 emit_move_insn (op0
, temp
);
1269 /* Store a bit field that is split across multiple accessible memory objects.
1271 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1272 BITSIZE is the field width; BITPOS the position of its first bit
1274 VALUE is the value to store, which has mode VALUE_MODE.
1275 If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is
1278 If REVERSE is true, the store is to be done in reverse order.
1280 This does not yet handle fields wider than BITS_PER_WORD. */
1283 store_split_bit_field (rtx op0
, opt_scalar_int_mode op0_mode
,
1284 unsigned HOST_WIDE_INT bitsize
,
1285 unsigned HOST_WIDE_INT bitpos
,
1286 unsigned HOST_WIDE_INT bitregion_start
,
1287 unsigned HOST_WIDE_INT bitregion_end
,
1288 rtx value
, scalar_int_mode value_mode
, bool reverse
)
1290 unsigned int unit
, total_bits
, bitsdone
= 0;
1292 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1294 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1295 unit
= BITS_PER_WORD
;
1297 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1299 /* If OP0 is a memory with a mode, then UNIT must not be larger than
1300 OP0's mode as well. Otherwise, store_fixed_bit_field will call us
1301 again, and we will mutually recurse forever. */
1302 if (MEM_P (op0
) && op0_mode
.exists ())
1303 unit
= MIN (unit
, GET_MODE_BITSIZE (op0_mode
.require ()));
1305 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1306 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1307 that VALUE might be a floating-point constant. */
1308 if (CONSTANT_P (value
) && !CONST_INT_P (value
))
1310 rtx word
= gen_lowpart_common (word_mode
, value
);
1312 if (word
&& (value
!= word
))
1315 value
= gen_lowpart_common (word_mode
, force_reg (value_mode
, value
));
1316 value_mode
= word_mode
;
1319 total_bits
= GET_MODE_BITSIZE (value_mode
);
1321 while (bitsdone
< bitsize
)
1323 unsigned HOST_WIDE_INT thissize
;
1324 unsigned HOST_WIDE_INT thispos
;
1325 unsigned HOST_WIDE_INT offset
;
1328 offset
= (bitpos
+ bitsdone
) / unit
;
1329 thispos
= (bitpos
+ bitsdone
) % unit
;
1331 /* When region of bytes we can touch is restricted, decrease
1332 UNIT close to the end of the region as needed. If op0 is a REG
1333 or SUBREG of REG, don't do this, as there can't be data races
1334 on a register and we can expand shorter code in some cases. */
1336 && unit
> BITS_PER_UNIT
1337 && bitpos
+ bitsdone
- thispos
+ unit
> bitregion_end
+ 1
1339 && (GET_CODE (op0
) != SUBREG
|| !REG_P (SUBREG_REG (op0
))))
1345 /* THISSIZE must not overrun a word boundary. Otherwise,
1346 store_fixed_bit_field will call us again, and we will mutually
1348 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1349 thissize
= MIN (thissize
, unit
- thispos
);
1351 if (reverse
? !BYTES_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
1353 /* Fetch successively less significant portions. */
1354 if (CONST_INT_P (value
))
1355 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1356 >> (bitsize
- bitsdone
- thissize
))
1357 & ((HOST_WIDE_INT_1
<< thissize
) - 1));
1358 /* Likewise, but the source is little-endian. */
1360 part
= extract_fixed_bit_field (word_mode
, value
, value_mode
,
1362 bitsize
- bitsdone
- thissize
,
1363 NULL_RTX
, 1, false);
1365 /* The args are chosen so that the last part includes the
1366 lsb. Give extract_bit_field the value it needs (with
1367 endianness compensation) to fetch the piece we want. */
1368 part
= extract_fixed_bit_field (word_mode
, value
, value_mode
,
1370 total_bits
- bitsize
+ bitsdone
,
1371 NULL_RTX
, 1, false);
1375 /* Fetch successively more significant portions. */
1376 if (CONST_INT_P (value
))
1377 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1379 & ((HOST_WIDE_INT_1
<< thissize
) - 1));
1380 /* Likewise, but the source is big-endian. */
1382 part
= extract_fixed_bit_field (word_mode
, value
, value_mode
,
1384 total_bits
- bitsdone
- thissize
,
1385 NULL_RTX
, 1, false);
1387 part
= extract_fixed_bit_field (word_mode
, value
, value_mode
,
1388 thissize
, bitsdone
, NULL_RTX
,
1392 /* If OP0 is a register, then handle OFFSET here. */
1393 rtx op0_piece
= op0
;
1394 opt_scalar_int_mode op0_piece_mode
= op0_mode
;
1395 if (SUBREG_P (op0
) || REG_P (op0
))
1397 scalar_int_mode imode
;
1398 if (op0_mode
.exists (&imode
)
1399 && GET_MODE_SIZE (imode
) < UNITS_PER_WORD
)
1402 op0_piece
= const0_rtx
;
1406 op0_piece
= operand_subword_force (op0
,
1407 offset
* unit
/ BITS_PER_WORD
,
1409 op0_piece_mode
= word_mode
;
1411 offset
&= BITS_PER_WORD
/ unit
- 1;
1414 /* OFFSET is in UNITs, and UNIT is in bits. If WORD is const0_rtx,
1415 it is just an out-of-bounds access. Ignore it. */
1416 if (op0_piece
!= const0_rtx
)
1417 store_fixed_bit_field (op0_piece
, op0_piece_mode
, thissize
,
1418 offset
* unit
+ thispos
, bitregion_start
,
1419 bitregion_end
, part
, word_mode
, reverse
);
1420 bitsdone
+= thissize
;
1424 /* A subroutine of extract_bit_field_1 that converts return value X
1425 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1426 to extract_bit_field. */
1429 convert_extracted_bit_field (rtx x
, machine_mode mode
,
1430 machine_mode tmode
, bool unsignedp
)
1432 if (GET_MODE (x
) == tmode
|| GET_MODE (x
) == mode
)
1435 /* If the x mode is not a scalar integral, first convert to the
1436 integer mode of that size and then access it as a floating-point
1437 value via a SUBREG. */
1438 if (!SCALAR_INT_MODE_P (tmode
))
1440 scalar_int_mode int_mode
= int_mode_for_mode (tmode
).require ();
1441 x
= convert_to_mode (int_mode
, x
, unsignedp
);
1442 x
= force_reg (int_mode
, x
);
1443 return gen_lowpart (tmode
, x
);
1446 return convert_to_mode (tmode
, x
, unsignedp
);
1449 /* Try to use an ext(z)v pattern to extract a field from OP0.
1450 Return the extracted value on success, otherwise return null.
1451 EXTV describes the extraction instruction to use. If OP0_MODE
1452 is defined, it is the mode of OP0, otherwise OP0 is a BLKmode MEM.
1453 The other arguments are as for extract_bit_field. */
1456 extract_bit_field_using_extv (const extraction_insn
*extv
, rtx op0
,
1457 opt_scalar_int_mode op0_mode
,
1458 unsigned HOST_WIDE_INT bitsize
,
1459 unsigned HOST_WIDE_INT bitnum
,
1460 int unsignedp
, rtx target
,
1461 machine_mode mode
, machine_mode tmode
)
1463 struct expand_operand ops
[4];
1464 rtx spec_target
= target
;
1465 rtx spec_target_subreg
= 0;
1466 scalar_int_mode ext_mode
= extv
->field_mode
;
1467 unsigned unit
= GET_MODE_BITSIZE (ext_mode
);
1469 if (bitsize
== 0 || unit
< bitsize
)
1473 /* Get a reference to the first byte of the field. */
1474 op0
= narrow_bit_field_mem (op0
, extv
->struct_mode
, bitsize
, bitnum
,
1478 /* Convert from counting within OP0 to counting in EXT_MODE. */
1479 if (BYTES_BIG_ENDIAN
)
1480 bitnum
+= unit
- GET_MODE_BITSIZE (op0_mode
.require ());
1482 /* If op0 is a register, we need it in EXT_MODE to make it
1483 acceptable to the format of ext(z)v. */
1484 if (GET_CODE (op0
) == SUBREG
&& op0_mode
.require () != ext_mode
)
1486 if (REG_P (op0
) && op0_mode
.require () != ext_mode
)
1487 op0
= gen_lowpart_SUBREG (ext_mode
, op0
);
1490 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
1491 "backwards" from the size of the unit we are extracting from.
1492 Otherwise, we count bits from the most significant on a
1493 BYTES/BITS_BIG_ENDIAN machine. */
1495 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1496 bitnum
= unit
- bitsize
- bitnum
;
1499 target
= spec_target
= gen_reg_rtx (tmode
);
1501 if (GET_MODE (target
) != ext_mode
)
1503 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1504 between the mode of the extraction (word_mode) and the target
1505 mode. Instead, create a temporary and use convert_move to set
1508 && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (target
), ext_mode
))
1510 target
= gen_lowpart (ext_mode
, target
);
1511 if (partial_subreg_p (GET_MODE (spec_target
), ext_mode
))
1512 spec_target_subreg
= target
;
1515 target
= gen_reg_rtx (ext_mode
);
1518 create_output_operand (&ops
[0], target
, ext_mode
);
1519 create_fixed_operand (&ops
[1], op0
);
1520 create_integer_operand (&ops
[2], bitsize
);
1521 create_integer_operand (&ops
[3], bitnum
);
1522 if (maybe_expand_insn (extv
->icode
, 4, ops
))
1524 target
= ops
[0].value
;
1525 if (target
== spec_target
)
1527 if (target
== spec_target_subreg
)
1529 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1534 /* A subroutine of extract_bit_field, with the same arguments.
1535 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1536 if we can find no other means of implementing the operation.
1537 if FALLBACK_P is false, return NULL instead. */
1540 extract_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1541 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, rtx target
,
1542 machine_mode mode
, machine_mode tmode
,
1543 bool reverse
, bool fallback_p
, rtx
*alt_rtl
)
1548 if (tmode
== VOIDmode
)
1551 while (GET_CODE (op0
) == SUBREG
)
1553 bitnum
+= SUBREG_BYTE (op0
) * BITS_PER_UNIT
;
1554 op0
= SUBREG_REG (op0
);
1557 /* If we have an out-of-bounds access to a register, just return an
1558 uninitialized register of the required mode. This can occur if the
1559 source code contains an out-of-bounds access to a small array. */
1560 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
1561 return gen_reg_rtx (tmode
);
1564 && mode
== GET_MODE (op0
)
1566 && bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
1569 op0
= flip_storage_order (mode
, op0
);
1570 /* We're trying to extract a full register from itself. */
1574 /* First try to check for vector from vector extractions. */
1575 if (VECTOR_MODE_P (GET_MODE (op0
))
1577 && VECTOR_MODE_P (tmode
)
1578 && GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (tmode
))
1580 machine_mode new_mode
= GET_MODE (op0
);
1581 if (GET_MODE_INNER (new_mode
) != GET_MODE_INNER (tmode
))
1583 scalar_mode inner_mode
= GET_MODE_INNER (tmode
);
1584 unsigned int nunits
= (GET_MODE_BITSIZE (GET_MODE (op0
))
1585 / GET_MODE_UNIT_BITSIZE (tmode
));
1586 if (!mode_for_vector (inner_mode
, nunits
).exists (&new_mode
)
1587 || !VECTOR_MODE_P (new_mode
)
1588 || GET_MODE_SIZE (new_mode
) != GET_MODE_SIZE (GET_MODE (op0
))
1589 || GET_MODE_INNER (new_mode
) != GET_MODE_INNER (tmode
)
1590 || !targetm
.vector_mode_supported_p (new_mode
))
1591 new_mode
= VOIDmode
;
1593 if (new_mode
!= VOIDmode
1594 && (convert_optab_handler (vec_extract_optab
, new_mode
, tmode
)
1595 != CODE_FOR_nothing
)
1596 && ((bitnum
+ bitsize
- 1) / GET_MODE_BITSIZE (tmode
)
1597 == bitnum
/ GET_MODE_BITSIZE (tmode
)))
1599 struct expand_operand ops
[3];
1600 machine_mode outermode
= new_mode
;
1601 machine_mode innermode
= tmode
;
1602 enum insn_code icode
1603 = convert_optab_handler (vec_extract_optab
, outermode
, innermode
);
1604 unsigned HOST_WIDE_INT pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
1606 if (new_mode
!= GET_MODE (op0
))
1607 op0
= gen_lowpart (new_mode
, op0
);
1608 create_output_operand (&ops
[0], target
, innermode
);
1610 create_input_operand (&ops
[1], op0
, outermode
);
1611 create_integer_operand (&ops
[2], pos
);
1612 if (maybe_expand_insn (icode
, 3, ops
))
1614 if (alt_rtl
&& ops
[0].target
)
1616 target
= ops
[0].value
;
1617 if (GET_MODE (target
) != mode
)
1618 return gen_lowpart (tmode
, target
);
1624 /* See if we can get a better vector mode before extracting. */
1625 if (VECTOR_MODE_P (GET_MODE (op0
))
1627 && GET_MODE_INNER (GET_MODE (op0
)) != tmode
)
1629 machine_mode new_mode
;
1631 if (GET_MODE_CLASS (tmode
) == MODE_FLOAT
)
1632 new_mode
= MIN_MODE_VECTOR_FLOAT
;
1633 else if (GET_MODE_CLASS (tmode
) == MODE_FRACT
)
1634 new_mode
= MIN_MODE_VECTOR_FRACT
;
1635 else if (GET_MODE_CLASS (tmode
) == MODE_UFRACT
)
1636 new_mode
= MIN_MODE_VECTOR_UFRACT
;
1637 else if (GET_MODE_CLASS (tmode
) == MODE_ACCUM
)
1638 new_mode
= MIN_MODE_VECTOR_ACCUM
;
1639 else if (GET_MODE_CLASS (tmode
) == MODE_UACCUM
)
1640 new_mode
= MIN_MODE_VECTOR_UACCUM
;
1642 new_mode
= MIN_MODE_VECTOR_INT
;
1644 FOR_EACH_MODE_FROM (new_mode
, new_mode
)
1645 if (GET_MODE_SIZE (new_mode
) == GET_MODE_SIZE (GET_MODE (op0
))
1646 && GET_MODE_UNIT_SIZE (new_mode
) == GET_MODE_SIZE (tmode
)
1647 && targetm
.vector_mode_supported_p (new_mode
))
1649 if (new_mode
!= VOIDmode
)
1650 op0
= gen_lowpart (new_mode
, op0
);
1653 /* Use vec_extract patterns for extracting parts of vectors whenever
1655 machine_mode outermode
= GET_MODE (op0
);
1656 scalar_mode innermode
= GET_MODE_INNER (outermode
);
1657 if (VECTOR_MODE_P (outermode
)
1659 && (convert_optab_handler (vec_extract_optab
, outermode
, innermode
)
1660 != CODE_FOR_nothing
)
1661 && ((bitnum
+ bitsize
- 1) / GET_MODE_BITSIZE (innermode
)
1662 == bitnum
/ GET_MODE_BITSIZE (innermode
)))
1664 struct expand_operand ops
[3];
1665 enum insn_code icode
1666 = convert_optab_handler (vec_extract_optab
, outermode
, innermode
);
1667 unsigned HOST_WIDE_INT pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
1669 create_output_operand (&ops
[0], target
, innermode
);
1671 create_input_operand (&ops
[1], op0
, outermode
);
1672 create_integer_operand (&ops
[2], pos
);
1673 if (maybe_expand_insn (icode
, 3, ops
))
1675 if (alt_rtl
&& ops
[0].target
)
1677 target
= ops
[0].value
;
1678 if (GET_MODE (target
) != mode
)
1679 return gen_lowpart (tmode
, target
);
1684 /* Make sure we are playing with integral modes. Pun with subregs
1686 opt_scalar_int_mode op0_mode
= int_mode_for_mode (GET_MODE (op0
));
1687 scalar_int_mode imode
;
1688 if (!op0_mode
.exists (&imode
) || imode
!= GET_MODE (op0
))
1691 op0
= adjust_bitfield_address_size (op0
, op0_mode
.else_blk (),
1693 else if (op0_mode
.exists (&imode
))
1695 op0
= gen_lowpart (imode
, op0
);
1697 /* If we got a SUBREG, force it into a register since we
1698 aren't going to be able to do another SUBREG on it. */
1699 if (GET_CODE (op0
) == SUBREG
)
1700 op0
= force_reg (imode
, op0
);
1704 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (op0
));
1705 rtx mem
= assign_stack_temp (GET_MODE (op0
), size
);
1706 emit_move_insn (mem
, op0
);
1707 op0
= adjust_bitfield_address_size (mem
, BLKmode
, 0, size
);
1711 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1712 If that's wrong, the solution is to test for it and set TARGET to 0
1715 /* Get the mode of the field to use for atomic access or subreg
1717 if (!SCALAR_INT_MODE_P (tmode
)
1718 || !mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0).exists (&mode1
))
1720 gcc_assert (mode1
!= BLKmode
);
1722 /* Extraction of a full MODE1 value can be done with a subreg as long
1723 as the least significant bit of the value is the least significant
1724 bit of either OP0 or a word of OP0. */
1727 && lowpart_bit_field_p (bitnum
, bitsize
, op0_mode
.require ())
1728 && bitsize
== GET_MODE_BITSIZE (mode1
)
1729 && TRULY_NOOP_TRUNCATION_MODES_P (mode1
, op0_mode
.require ()))
1731 rtx sub
= simplify_gen_subreg (mode1
, op0
, op0_mode
.require (),
1732 bitnum
/ BITS_PER_UNIT
);
1734 return convert_extracted_bit_field (sub
, mode
, tmode
, unsignedp
);
1737 /* Extraction of a full MODE1 value can be done with a load as long as
1738 the field is on a byte boundary and is sufficiently aligned. */
1739 if (simple_mem_bitfield_p (op0
, bitsize
, bitnum
, mode1
))
1741 op0
= adjust_bitfield_address (op0
, mode1
, bitnum
/ BITS_PER_UNIT
);
1743 op0
= flip_storage_order (mode1
, op0
);
1744 return convert_extracted_bit_field (op0
, mode
, tmode
, unsignedp
);
1747 /* Handle fields bigger than a word. */
1749 if (bitsize
> BITS_PER_WORD
)
1751 /* Here we transfer the words of the field
1752 in the order least significant first.
1753 This is because the most significant word is the one which may
1754 be less than full. */
1756 const bool backwards
= WORDS_BIG_ENDIAN
;
1757 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
1761 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1762 target
= gen_reg_rtx (mode
);
1764 /* In case we're about to clobber a base register or something
1765 (see gcc.c-torture/execute/20040625-1.c). */
1766 if (reg_mentioned_p (target
, str_rtx
))
1767 target
= gen_reg_rtx (mode
);
1769 /* Indicate for flow that the entire target reg is being set. */
1770 emit_clobber (target
);
1772 last
= get_last_insn ();
1773 for (i
= 0; i
< nwords
; i
++)
1775 /* If I is 0, use the low-order word in both field and target;
1776 if I is 1, use the next to lowest word; and so on. */
1777 /* Word number in TARGET to use. */
1778 unsigned int wordnum
1780 ? GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
- i
- 1
1782 /* Offset from start of field in OP0. */
1783 unsigned int bit_offset
= (backwards
^ reverse
1784 ? MAX ((int) bitsize
- ((int) i
+ 1)
1787 : (int) i
* BITS_PER_WORD
);
1788 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1790 = extract_bit_field_1 (op0
, MIN (BITS_PER_WORD
,
1791 bitsize
- i
* BITS_PER_WORD
),
1792 bitnum
+ bit_offset
, 1, target_part
,
1793 mode
, word_mode
, reverse
, fallback_p
, NULL
);
1795 gcc_assert (target_part
);
1798 delete_insns_since (last
);
1802 if (result_part
!= target_part
)
1803 emit_move_insn (target_part
, result_part
);
1808 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1809 need to be zero'd out. */
1810 if (GET_MODE_SIZE (GET_MODE (target
)) > nwords
* UNITS_PER_WORD
)
1812 unsigned int i
, total_words
;
1814 total_words
= GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
;
1815 for (i
= nwords
; i
< total_words
; i
++)
1817 (operand_subword (target
,
1818 backwards
? total_words
- i
- 1 : i
,
1825 /* Signed bit field: sign-extend with two arithmetic shifts. */
1826 target
= expand_shift (LSHIFT_EXPR
, mode
, target
,
1827 GET_MODE_BITSIZE (mode
) - bitsize
, NULL_RTX
, 0);
1828 return expand_shift (RSHIFT_EXPR
, mode
, target
,
1829 GET_MODE_BITSIZE (mode
) - bitsize
, NULL_RTX
, 0);
1832 /* If OP0 is a multi-word register, narrow it to the affected word.
1833 If the region spans two words, defer to extract_split_bit_field. */
1834 if (!MEM_P (op0
) && GET_MODE_SIZE (op0_mode
.require ()) > UNITS_PER_WORD
)
1836 if (bitnum
% BITS_PER_WORD
+ bitsize
> BITS_PER_WORD
)
1840 target
= extract_split_bit_field (op0
, op0_mode
, bitsize
, bitnum
,
1841 unsignedp
, reverse
);
1842 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1844 op0
= simplify_gen_subreg (word_mode
, op0
, op0_mode
.require (),
1845 bitnum
/ BITS_PER_WORD
* UNITS_PER_WORD
);
1846 op0_mode
= word_mode
;
1847 bitnum
%= BITS_PER_WORD
;
1850 /* From here on we know the desired field is smaller than a word.
1851 If OP0 is a register, it too fits within a word. */
1852 enum extraction_pattern pattern
= unsignedp
? EP_extzv
: EP_extv
;
1853 extraction_insn extv
;
1856 /* ??? We could limit the structure size to the part of OP0 that
1857 contains the field, with appropriate checks for endianness
1858 and TARGET_TRULY_NOOP_TRUNCATION. */
1859 && get_best_reg_extraction_insn (&extv
, pattern
,
1860 GET_MODE_BITSIZE (op0_mode
.require ()),
1863 rtx result
= extract_bit_field_using_extv (&extv
, op0
, op0_mode
,
1865 unsignedp
, target
, mode
,
1871 /* If OP0 is a memory, try copying it to a register and seeing if a
1872 cheap register alternative is available. */
1873 if (MEM_P (op0
) & !reverse
)
1875 if (get_best_mem_extraction_insn (&extv
, pattern
, bitsize
, bitnum
,
1878 rtx result
= extract_bit_field_using_extv (&extv
, op0
, op0_mode
,
1880 unsignedp
, target
, mode
,
1886 rtx_insn
*last
= get_last_insn ();
1888 /* Try loading part of OP0 into a register and extracting the
1889 bitfield from that. */
1890 unsigned HOST_WIDE_INT bitpos
;
1891 rtx xop0
= adjust_bit_field_mem_for_reg (pattern
, op0
, bitsize
, bitnum
,
1892 0, 0, tmode
, &bitpos
);
1895 xop0
= copy_to_reg (xop0
);
1896 rtx result
= extract_bit_field_1 (xop0
, bitsize
, bitpos
,
1898 mode
, tmode
, reverse
, false, NULL
);
1901 delete_insns_since (last
);
1908 /* Find a correspondingly-sized integer field, so we can apply
1909 shifts and masks to it. */
1910 scalar_int_mode int_mode
;
1911 if (!int_mode_for_mode (tmode
).exists (&int_mode
))
1912 /* If this fails, we should probably push op0 out to memory and then
1914 int_mode
= int_mode_for_mode (mode
).require ();
1916 target
= extract_fixed_bit_field (int_mode
, op0
, op0_mode
, bitsize
,
1917 bitnum
, target
, unsignedp
, reverse
);
1919 /* Complex values must be reversed piecewise, so we need to undo the global
1920 reversal, convert to the complex mode and reverse again. */
1921 if (reverse
&& COMPLEX_MODE_P (tmode
))
1923 target
= flip_storage_order (int_mode
, target
);
1924 target
= convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1925 target
= flip_storage_order (tmode
, target
);
1928 target
= convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1933 /* Generate code to extract a byte-field from STR_RTX
1934 containing BITSIZE bits, starting at BITNUM,
1935 and put it in TARGET if possible (if TARGET is nonzero).
1936 Regardless of TARGET, we return the rtx for where the value is placed.
1938 STR_RTX is the structure containing the byte (a REG or MEM).
1939 UNSIGNEDP is nonzero if this is an unsigned bit field.
1940 MODE is the natural mode of the field value once extracted.
1941 TMODE is the mode the caller would like the value to have;
1942 but the value may be returned with type MODE instead.
1944 If REVERSE is true, the extraction is to be done in reverse order.
1946 If a TARGET is specified and we can store in it at no extra cost,
1947 we do so, and return TARGET.
1948 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1949 if they are equally easy. */
1952 extract_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1953 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, rtx target
,
1954 machine_mode mode
, machine_mode tmode
, bool reverse
,
1959 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
1960 if (GET_MODE_BITSIZE (GET_MODE (str_rtx
)) > 0)
1961 mode1
= GET_MODE (str_rtx
);
1962 else if (target
&& GET_MODE_BITSIZE (GET_MODE (target
)) > 0)
1963 mode1
= GET_MODE (target
);
1967 scalar_int_mode int_mode
;
1968 if (is_a
<scalar_int_mode
> (mode1
, &int_mode
)
1969 && strict_volatile_bitfield_p (str_rtx
, bitsize
, bitnum
, int_mode
, 0, 0))
1971 /* Extraction of a full INT_MODE value can be done with a simple load.
1972 We know here that the field can be accessed with one single
1973 instruction. For targets that support unaligned memory,
1974 an unaligned access may be necessary. */
1975 if (bitsize
== GET_MODE_BITSIZE (int_mode
))
1977 rtx result
= adjust_bitfield_address (str_rtx
, int_mode
,
1978 bitnum
/ BITS_PER_UNIT
);
1980 result
= flip_storage_order (int_mode
, result
);
1981 gcc_assert (bitnum
% BITS_PER_UNIT
== 0);
1982 return convert_extracted_bit_field (result
, mode
, tmode
, unsignedp
);
1985 str_rtx
= narrow_bit_field_mem (str_rtx
, int_mode
, bitsize
, bitnum
,
1987 gcc_assert (bitnum
+ bitsize
<= GET_MODE_BITSIZE (int_mode
));
1988 str_rtx
= copy_to_reg (str_rtx
);
1991 return extract_bit_field_1 (str_rtx
, bitsize
, bitnum
, unsignedp
,
1992 target
, mode
, tmode
, reverse
, true, alt_rtl
);
1995 /* Use shifts and boolean operations to extract a field of BITSIZE bits
1996 from bit BITNUM of OP0. If OP0_MODE is defined, it is the mode of OP0,
1997 otherwise OP0 is a BLKmode MEM.
1999 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
2000 If REVERSE is true, the extraction is to be done in reverse order.
2002 If TARGET is nonzero, attempts to store the value there
2003 and return TARGET, but this is not guaranteed.
2004 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
2007 extract_fixed_bit_field (machine_mode tmode
, rtx op0
,
2008 opt_scalar_int_mode op0_mode
,
2009 unsigned HOST_WIDE_INT bitsize
,
2010 unsigned HOST_WIDE_INT bitnum
, rtx target
,
2011 int unsignedp
, bool reverse
)
2013 scalar_int_mode mode
;
2016 if (!get_best_mode (bitsize
, bitnum
, 0, 0, MEM_ALIGN (op0
),
2017 BITS_PER_WORD
, MEM_VOLATILE_P (op0
), &mode
))
2018 /* The only way this should occur is if the field spans word
2020 return extract_split_bit_field (op0
, op0_mode
, bitsize
, bitnum
,
2021 unsignedp
, reverse
);
2023 op0
= narrow_bit_field_mem (op0
, mode
, bitsize
, bitnum
, &bitnum
);
2026 mode
= op0_mode
.require ();
2028 return extract_fixed_bit_field_1 (tmode
, op0
, mode
, bitsize
, bitnum
,
2029 target
, unsignedp
, reverse
);
2032 /* Helper function for extract_fixed_bit_field, extracts
2033 the bit field always using MODE, which is the mode of OP0.
2034 The other arguments are as for extract_fixed_bit_field. */
2037 extract_fixed_bit_field_1 (machine_mode tmode
, rtx op0
, scalar_int_mode mode
,
2038 unsigned HOST_WIDE_INT bitsize
,
2039 unsigned HOST_WIDE_INT bitnum
, rtx target
,
2040 int unsignedp
, bool reverse
)
2042 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
2043 for invalid input, such as extract equivalent of f5 from
2044 gcc.dg/pr48335-2.c. */
2046 if (reverse
? !BYTES_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
2047 /* BITNUM is the distance between our msb and that of OP0.
2048 Convert it to the distance from the lsb. */
2049 bitnum
= GET_MODE_BITSIZE (mode
) - bitsize
- bitnum
;
2051 /* Now BITNUM is always the distance between the field's lsb and that of OP0.
2052 We have reduced the big-endian case to the little-endian case. */
2054 op0
= flip_storage_order (mode
, op0
);
2060 /* If the field does not already start at the lsb,
2061 shift it so it does. */
2062 /* Maybe propagate the target for the shift. */
2063 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
2066 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, bitnum
, subtarget
, 1);
2068 /* Convert the value to the desired mode. TMODE must also be a
2069 scalar integer for this conversion to make sense, since we
2070 shouldn't reinterpret the bits. */
2071 scalar_int_mode new_mode
= as_a
<scalar_int_mode
> (tmode
);
2072 if (mode
!= new_mode
)
2073 op0
= convert_to_mode (new_mode
, op0
, 1);
2075 /* Unless the msb of the field used to be the msb when we shifted,
2076 mask out the upper bits. */
2078 if (GET_MODE_BITSIZE (mode
) != bitnum
+ bitsize
)
2079 return expand_binop (new_mode
, and_optab
, op0
,
2080 mask_rtx (new_mode
, 0, bitsize
, 0),
2081 target
, 1, OPTAB_LIB_WIDEN
);
2085 /* To extract a signed bit-field, first shift its msb to the msb of the word,
2086 then arithmetic-shift its lsb to the lsb of the word. */
2087 op0
= force_reg (mode
, op0
);
2089 /* Find the narrowest integer mode that contains the field. */
2091 opt_scalar_int_mode mode_iter
;
2092 FOR_EACH_MODE_IN_CLASS (mode_iter
, MODE_INT
)
2093 if (GET_MODE_BITSIZE (mode_iter
.require ()) >= bitsize
+ bitnum
)
2096 mode
= mode_iter
.require ();
2097 op0
= convert_to_mode (mode
, op0
, 0);
2102 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitnum
))
2104 int amount
= GET_MODE_BITSIZE (mode
) - (bitsize
+ bitnum
);
2105 /* Maybe propagate the target for the shift. */
2106 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
2107 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
2110 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
2111 GET_MODE_BITSIZE (mode
) - bitsize
, target
, 0);
2114 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
2118 lshift_value (machine_mode mode
, unsigned HOST_WIDE_INT value
,
2121 return immed_wide_int_const (wi::lshift (value
, bitpos
), mode
);
2124 /* Extract a bit field that is split across two words
2125 and return an RTX for the result.
2127 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2128 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2129 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
2130 If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is
2133 If REVERSE is true, the extraction is to be done in reverse order. */
2136 extract_split_bit_field (rtx op0
, opt_scalar_int_mode op0_mode
,
2137 unsigned HOST_WIDE_INT bitsize
,
2138 unsigned HOST_WIDE_INT bitpos
, int unsignedp
,
2142 unsigned int bitsdone
= 0;
2143 rtx result
= NULL_RTX
;
2146 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2148 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
2149 unit
= BITS_PER_WORD
;
2151 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
2153 while (bitsdone
< bitsize
)
2155 unsigned HOST_WIDE_INT thissize
;
2157 unsigned HOST_WIDE_INT thispos
;
2158 unsigned HOST_WIDE_INT offset
;
2160 offset
= (bitpos
+ bitsdone
) / unit
;
2161 thispos
= (bitpos
+ bitsdone
) % unit
;
2163 /* THISSIZE must not overrun a word boundary. Otherwise,
2164 extract_fixed_bit_field will call us again, and we will mutually
2166 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
2167 thissize
= MIN (thissize
, unit
- thispos
);
2169 /* If OP0 is a register, then handle OFFSET here. */
2170 rtx op0_piece
= op0
;
2171 opt_scalar_int_mode op0_piece_mode
= op0_mode
;
2172 if (SUBREG_P (op0
) || REG_P (op0
))
2174 op0_piece
= operand_subword_force (op0
, offset
, op0_mode
.require ());
2175 op0_piece_mode
= word_mode
;
2179 /* Extract the parts in bit-counting order,
2180 whose meaning is determined by BYTES_PER_UNIT.
2181 OFFSET is in UNITs, and UNIT is in bits. */
2182 part
= extract_fixed_bit_field (word_mode
, op0_piece
, op0_piece_mode
,
2183 thissize
, offset
* unit
+ thispos
,
2185 bitsdone
+= thissize
;
2187 /* Shift this part into place for the result. */
2188 if (reverse
? !BYTES_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
2190 if (bitsize
!= bitsdone
)
2191 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
2192 bitsize
- bitsdone
, 0, 1);
2196 if (bitsdone
!= thissize
)
2197 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
2198 bitsdone
- thissize
, 0, 1);
2204 /* Combine the parts with bitwise or. This works
2205 because we extracted each part as an unsigned bit field. */
2206 result
= expand_binop (word_mode
, ior_optab
, part
, result
, NULL_RTX
, 1,
2212 /* Unsigned bit field: we are done. */
2215 /* Signed bit field: sign-extend with two arithmetic shifts. */
2216 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
2217 BITS_PER_WORD
- bitsize
, NULL_RTX
, 0);
2218 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
2219 BITS_PER_WORD
- bitsize
, NULL_RTX
, 0);
2222 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2223 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2224 MODE, fill the upper bits with zeros. Fail if the layout of either
2225 mode is unknown (as for CC modes) or if the extraction would involve
2226 unprofitable mode punning. Return the value on success, otherwise
2229 This is different from gen_lowpart* in these respects:
2231 - the returned value must always be considered an rvalue
2233 - when MODE is wider than SRC_MODE, the extraction involves
2236 - when MODE is smaller than SRC_MODE, the extraction involves
2237 a truncation (and is thus subject to TARGET_TRULY_NOOP_TRUNCATION).
2239 In other words, this routine performs a computation, whereas the
2240 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2244 extract_low_bits (machine_mode mode
, machine_mode src_mode
, rtx src
)
2246 scalar_int_mode int_mode
, src_int_mode
;
2248 if (mode
== src_mode
)
2251 if (CONSTANT_P (src
))
2253 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2254 fails, it will happily create (subreg (symbol_ref)) or similar
2256 unsigned int byte
= subreg_lowpart_offset (mode
, src_mode
);
2257 rtx ret
= simplify_subreg (mode
, src
, src_mode
, byte
);
2261 if (GET_MODE (src
) == VOIDmode
2262 || !validate_subreg (mode
, src_mode
, src
, byte
))
2265 src
= force_reg (GET_MODE (src
), src
);
2266 return gen_rtx_SUBREG (mode
, src
, byte
);
2269 if (GET_MODE_CLASS (mode
) == MODE_CC
|| GET_MODE_CLASS (src_mode
) == MODE_CC
)
2272 if (GET_MODE_BITSIZE (mode
) == GET_MODE_BITSIZE (src_mode
)
2273 && targetm
.modes_tieable_p (mode
, src_mode
))
2275 rtx x
= gen_lowpart_common (mode
, src
);
2280 if (!int_mode_for_mode (src_mode
).exists (&src_int_mode
)
2281 || !int_mode_for_mode (mode
).exists (&int_mode
))
2284 if (!targetm
.modes_tieable_p (src_int_mode
, src_mode
))
2286 if (!targetm
.modes_tieable_p (int_mode
, mode
))
2289 src
= gen_lowpart (src_int_mode
, src
);
2290 src
= convert_modes (int_mode
, src_int_mode
, src
, true);
2291 src
= gen_lowpart (mode
, src
);
2295 /* Add INC into TARGET. */
2298 expand_inc (rtx target
, rtx inc
)
2300 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
2302 target
, 0, OPTAB_LIB_WIDEN
);
2303 if (value
!= target
)
2304 emit_move_insn (target
, value
);
2307 /* Subtract DEC from TARGET. */
2310 expand_dec (rtx target
, rtx dec
)
2312 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
2314 target
, 0, OPTAB_LIB_WIDEN
);
2315 if (value
!= target
)
2316 emit_move_insn (target
, value
);
2319 /* Output a shift instruction for expression code CODE,
2320 with SHIFTED being the rtx for the value to shift,
2321 and AMOUNT the rtx for the amount to shift by.
2322 Store the result in the rtx TARGET, if that is convenient.
2323 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2324 Return the rtx for where the value is.
2325 If that cannot be done, abort the compilation unless MAY_FAIL is true,
2326 in which case 0 is returned. */
2329 expand_shift_1 (enum tree_code code
, machine_mode mode
, rtx shifted
,
2330 rtx amount
, rtx target
, int unsignedp
, bool may_fail
= false)
2333 int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
2334 int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
2335 optab lshift_optab
= ashl_optab
;
2336 optab rshift_arith_optab
= ashr_optab
;
2337 optab rshift_uns_optab
= lshr_optab
;
2338 optab lrotate_optab
= rotl_optab
;
2339 optab rrotate_optab
= rotr_optab
;
2340 machine_mode op1_mode
;
2341 scalar_mode scalar_mode
= GET_MODE_INNER (mode
);
2343 bool speed
= optimize_insn_for_speed_p ();
2346 op1_mode
= GET_MODE (op1
);
2348 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2349 shift amount is a vector, use the vector/vector shift patterns. */
2350 if (VECTOR_MODE_P (mode
) && VECTOR_MODE_P (op1_mode
))
2352 lshift_optab
= vashl_optab
;
2353 rshift_arith_optab
= vashr_optab
;
2354 rshift_uns_optab
= vlshr_optab
;
2355 lrotate_optab
= vrotl_optab
;
2356 rrotate_optab
= vrotr_optab
;
2359 /* Previously detected shift-counts computed by NEGATE_EXPR
2360 and shifted in the other direction; but that does not work
2363 if (SHIFT_COUNT_TRUNCATED
)
2365 if (CONST_INT_P (op1
)
2366 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
) >=
2367 (unsigned HOST_WIDE_INT
) GET_MODE_BITSIZE (scalar_mode
)))
2368 op1
= GEN_INT ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
2369 % GET_MODE_BITSIZE (scalar_mode
));
2370 else if (GET_CODE (op1
) == SUBREG
2371 && subreg_lowpart_p (op1
)
2372 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1
)))
2373 && SCALAR_INT_MODE_P (GET_MODE (op1
)))
2374 op1
= SUBREG_REG (op1
);
2377 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
2378 prefer left rotation, if op1 is from bitsize / 2 + 1 to
2379 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
2382 && CONST_INT_P (op1
)
2383 && IN_RANGE (INTVAL (op1
), GET_MODE_BITSIZE (scalar_mode
) / 2 + left
,
2384 GET_MODE_BITSIZE (scalar_mode
) - 1))
2386 op1
= GEN_INT (GET_MODE_BITSIZE (scalar_mode
) - INTVAL (op1
));
2388 code
= left
? LROTATE_EXPR
: RROTATE_EXPR
;
2391 /* Rotation of 16bit values by 8 bits is effectively equivalent to a bswaphi.
2392 Note that this is not the case for bigger values. For instance a rotation
2393 of 0x01020304 by 16 bits gives 0x03040102 which is different from
2394 0x04030201 (bswapsi). */
2396 && CONST_INT_P (op1
)
2397 && INTVAL (op1
) == BITS_PER_UNIT
2398 && GET_MODE_SIZE (scalar_mode
) == 2
2399 && optab_handler (bswap_optab
, HImode
) != CODE_FOR_nothing
)
2400 return expand_unop (HImode
, bswap_optab
, shifted
, NULL_RTX
,
2403 if (op1
== const0_rtx
)
2406 /* Check whether its cheaper to implement a left shift by a constant
2407 bit count by a sequence of additions. */
2408 if (code
== LSHIFT_EXPR
2409 && CONST_INT_P (op1
)
2411 && INTVAL (op1
) < GET_MODE_PRECISION (scalar_mode
)
2412 && INTVAL (op1
) < MAX_BITS_PER_WORD
2413 && (shift_cost (speed
, mode
, INTVAL (op1
))
2414 > INTVAL (op1
) * add_cost (speed
, mode
))
2415 && shift_cost (speed
, mode
, INTVAL (op1
)) != MAX_COST
)
2418 for (i
= 0; i
< INTVAL (op1
); i
++)
2420 temp
= force_reg (mode
, shifted
);
2421 shifted
= expand_binop (mode
, add_optab
, temp
, temp
, NULL_RTX
,
2422 unsignedp
, OPTAB_LIB_WIDEN
);
2427 for (attempt
= 0; temp
== 0 && attempt
< 3; attempt
++)
2429 enum optab_methods methods
;
2432 methods
= OPTAB_DIRECT
;
2433 else if (attempt
== 1)
2434 methods
= OPTAB_WIDEN
;
2436 methods
= OPTAB_LIB_WIDEN
;
2440 /* Widening does not work for rotation. */
2441 if (methods
== OPTAB_WIDEN
)
2443 else if (methods
== OPTAB_LIB_WIDEN
)
2445 /* If we have been unable to open-code this by a rotation,
2446 do it as the IOR of two shifts. I.e., to rotate A
2448 (A << N) | ((unsigned) A >> ((-N) & (C - 1)))
2449 where C is the bitsize of A.
2451 It is theoretically possible that the target machine might
2452 not be able to perform either shift and hence we would
2453 be making two libcalls rather than just the one for the
2454 shift (similarly if IOR could not be done). We will allow
2455 this extremely unlikely lossage to avoid complicating the
2458 rtx subtarget
= target
== shifted
? 0 : target
;
2459 rtx new_amount
, other_amount
;
2463 if (op1
== const0_rtx
)
2465 else if (CONST_INT_P (op1
))
2466 other_amount
= GEN_INT (GET_MODE_BITSIZE (scalar_mode
)
2471 = simplify_gen_unary (NEG
, GET_MODE (op1
),
2472 op1
, GET_MODE (op1
));
2473 HOST_WIDE_INT mask
= GET_MODE_PRECISION (scalar_mode
) - 1;
2475 = simplify_gen_binary (AND
, GET_MODE (op1
), other_amount
,
2476 gen_int_mode (mask
, GET_MODE (op1
)));
2479 shifted
= force_reg (mode
, shifted
);
2481 temp
= expand_shift_1 (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
2482 mode
, shifted
, new_amount
, 0, 1);
2483 temp1
= expand_shift_1 (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
2484 mode
, shifted
, other_amount
,
2486 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
2487 unsignedp
, methods
);
2490 temp
= expand_binop (mode
,
2491 left
? lrotate_optab
: rrotate_optab
,
2492 shifted
, op1
, target
, unsignedp
, methods
);
2495 temp
= expand_binop (mode
,
2496 left
? lshift_optab
: rshift_uns_optab
,
2497 shifted
, op1
, target
, unsignedp
, methods
);
2499 /* Do arithmetic shifts.
2500 Also, if we are going to widen the operand, we can just as well
2501 use an arithmetic right-shift instead of a logical one. */
2502 if (temp
== 0 && ! rotate
2503 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
2505 enum optab_methods methods1
= methods
;
2507 /* If trying to widen a log shift to an arithmetic shift,
2508 don't accept an arithmetic shift of the same size. */
2510 methods1
= OPTAB_MUST_WIDEN
;
2512 /* Arithmetic shift */
2514 temp
= expand_binop (mode
,
2515 left
? lshift_optab
: rshift_arith_optab
,
2516 shifted
, op1
, target
, unsignedp
, methods1
);
2519 /* We used to try extzv here for logical right shifts, but that was
2520 only useful for one machine, the VAX, and caused poor code
2521 generation there for lshrdi3, so the code was deleted and a
2522 define_expand for lshrsi3 was added to vax.md. */
2525 gcc_assert (temp
!= NULL_RTX
|| may_fail
);
2529 /* Output a shift instruction for expression code CODE,
2530 with SHIFTED being the rtx for the value to shift,
2531 and AMOUNT the amount to shift by.
2532 Store the result in the rtx TARGET, if that is convenient.
2533 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2534 Return the rtx for where the value is. */
2537 expand_shift (enum tree_code code
, machine_mode mode
, rtx shifted
,
2538 int amount
, rtx target
, int unsignedp
)
2540 return expand_shift_1 (code
, mode
,
2541 shifted
, GEN_INT (amount
), target
, unsignedp
);
2544 /* Likewise, but return 0 if that cannot be done. */
2547 maybe_expand_shift (enum tree_code code
, machine_mode mode
, rtx shifted
,
2548 int amount
, rtx target
, int unsignedp
)
2550 return expand_shift_1 (code
, mode
,
2551 shifted
, GEN_INT (amount
), target
, unsignedp
, true);
2554 /* Output a shift instruction for expression code CODE,
2555 with SHIFTED being the rtx for the value to shift,
2556 and AMOUNT the tree for the amount to shift by.
2557 Store the result in the rtx TARGET, if that is convenient.
2558 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2559 Return the rtx for where the value is. */
2562 expand_variable_shift (enum tree_code code
, machine_mode mode
, rtx shifted
,
2563 tree amount
, rtx target
, int unsignedp
)
2565 return expand_shift_1 (code
, mode
,
2566 shifted
, expand_normal (amount
), target
, unsignedp
);
2570 static void synth_mult (struct algorithm
*, unsigned HOST_WIDE_INT
,
2571 const struct mult_cost
*, machine_mode mode
);
2572 static rtx
expand_mult_const (machine_mode
, rtx
, HOST_WIDE_INT
, rtx
,
2573 const struct algorithm
*, enum mult_variant
);
2574 static unsigned HOST_WIDE_INT
invert_mod2n (unsigned HOST_WIDE_INT
, int);
2575 static rtx
extract_high_half (scalar_int_mode
, rtx
);
2576 static rtx
expmed_mult_highpart (scalar_int_mode
, rtx
, rtx
, rtx
, int, int);
2577 static rtx
expmed_mult_highpart_optab (scalar_int_mode
, rtx
, rtx
, rtx
,
2579 /* Compute and return the best algorithm for multiplying by T.
2580 The algorithm must cost less than cost_limit
2581 If retval.cost >= COST_LIMIT, no algorithm was found and all
2582 other field of the returned struct are undefined.
2583 MODE is the machine mode of the multiplication. */
2586 synth_mult (struct algorithm
*alg_out
, unsigned HOST_WIDE_INT t
,
2587 const struct mult_cost
*cost_limit
, machine_mode mode
)
2590 struct algorithm
*alg_in
, *best_alg
;
2591 struct mult_cost best_cost
;
2592 struct mult_cost new_limit
;
2593 int op_cost
, op_latency
;
2594 unsigned HOST_WIDE_INT orig_t
= t
;
2595 unsigned HOST_WIDE_INT q
;
2596 int maxm
, hash_index
;
2597 bool cache_hit
= false;
2598 enum alg_code cache_alg
= alg_zero
;
2599 bool speed
= optimize_insn_for_speed_p ();
2600 scalar_int_mode imode
;
2601 struct alg_hash_entry
*entry_ptr
;
2603 /* Indicate that no algorithm is yet found. If no algorithm
2604 is found, this value will be returned and indicate failure. */
2605 alg_out
->cost
.cost
= cost_limit
->cost
+ 1;
2606 alg_out
->cost
.latency
= cost_limit
->latency
+ 1;
2608 if (cost_limit
->cost
< 0
2609 || (cost_limit
->cost
== 0 && cost_limit
->latency
<= 0))
2612 /* Be prepared for vector modes. */
2613 imode
= as_a
<scalar_int_mode
> (GET_MODE_INNER (mode
));
2615 maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (imode
));
2617 /* Restrict the bits of "t" to the multiplication's mode. */
2618 t
&= GET_MODE_MASK (imode
);
2620 /* t == 1 can be done in zero cost. */
2624 alg_out
->cost
.cost
= 0;
2625 alg_out
->cost
.latency
= 0;
2626 alg_out
->op
[0] = alg_m
;
2630 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2634 if (MULT_COST_LESS (cost_limit
, zero_cost (speed
)))
2639 alg_out
->cost
.cost
= zero_cost (speed
);
2640 alg_out
->cost
.latency
= zero_cost (speed
);
2641 alg_out
->op
[0] = alg_zero
;
2646 /* We'll be needing a couple extra algorithm structures now. */
2648 alg_in
= XALLOCA (struct algorithm
);
2649 best_alg
= XALLOCA (struct algorithm
);
2650 best_cost
= *cost_limit
;
2652 /* Compute the hash index. */
2653 hash_index
= (t
^ (unsigned int) mode
^ (speed
* 256)) % NUM_ALG_HASH_ENTRIES
;
2655 /* See if we already know what to do for T. */
2656 entry_ptr
= alg_hash_entry_ptr (hash_index
);
2657 if (entry_ptr
->t
== t
2658 && entry_ptr
->mode
== mode
2659 && entry_ptr
->speed
== speed
2660 && entry_ptr
->alg
!= alg_unknown
)
2662 cache_alg
= entry_ptr
->alg
;
2664 if (cache_alg
== alg_impossible
)
2666 /* The cache tells us that it's impossible to synthesize
2667 multiplication by T within entry_ptr->cost. */
2668 if (!CHEAPER_MULT_COST (&entry_ptr
->cost
, cost_limit
))
2669 /* COST_LIMIT is at least as restrictive as the one
2670 recorded in the hash table, in which case we have no
2671 hope of synthesizing a multiplication. Just
2675 /* If we get here, COST_LIMIT is less restrictive than the
2676 one recorded in the hash table, so we may be able to
2677 synthesize a multiplication. Proceed as if we didn't
2678 have the cache entry. */
2682 if (CHEAPER_MULT_COST (cost_limit
, &entry_ptr
->cost
))
2683 /* The cached algorithm shows that this multiplication
2684 requires more cost than COST_LIMIT. Just return. This
2685 way, we don't clobber this cache entry with
2686 alg_impossible but retain useful information. */
2698 goto do_alg_addsub_t_m2
;
2700 case alg_add_factor
:
2701 case alg_sub_factor
:
2702 goto do_alg_addsub_factor
;
2705 goto do_alg_add_t2_m
;
2708 goto do_alg_sub_t2_m
;
2716 /* If we have a group of zero bits at the low-order part of T, try
2717 multiplying by the remaining bits and then doing a shift. */
2722 m
= ctz_or_zero (t
); /* m = number of low zero bits */
2726 /* The function expand_shift will choose between a shift and
2727 a sequence of additions, so the observed cost is given as
2728 MIN (m * add_cost(speed, mode), shift_cost(speed, mode, m)). */
2729 op_cost
= m
* add_cost (speed
, mode
);
2730 if (shift_cost (speed
, mode
, m
) < op_cost
)
2731 op_cost
= shift_cost (speed
, mode
, m
);
2732 new_limit
.cost
= best_cost
.cost
- op_cost
;
2733 new_limit
.latency
= best_cost
.latency
- op_cost
;
2734 synth_mult (alg_in
, q
, &new_limit
, mode
);
2736 alg_in
->cost
.cost
+= op_cost
;
2737 alg_in
->cost
.latency
+= op_cost
;
2738 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2740 best_cost
= alg_in
->cost
;
2741 std::swap (alg_in
, best_alg
);
2742 best_alg
->log
[best_alg
->ops
] = m
;
2743 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2746 /* See if treating ORIG_T as a signed number yields a better
2747 sequence. Try this sequence only for a negative ORIG_T
2748 as it would be useless for a non-negative ORIG_T. */
2749 if ((HOST_WIDE_INT
) orig_t
< 0)
2751 /* Shift ORIG_T as follows because a right shift of a
2752 negative-valued signed type is implementation
2754 q
= ~(~orig_t
>> m
);
2755 /* The function expand_shift will choose between a shift
2756 and a sequence of additions, so the observed cost is
2757 given as MIN (m * add_cost(speed, mode),
2758 shift_cost(speed, mode, m)). */
2759 op_cost
= m
* add_cost (speed
, mode
);
2760 if (shift_cost (speed
, mode
, m
) < op_cost
)
2761 op_cost
= shift_cost (speed
, mode
, m
);
2762 new_limit
.cost
= best_cost
.cost
- op_cost
;
2763 new_limit
.latency
= best_cost
.latency
- op_cost
;
2764 synth_mult (alg_in
, q
, &new_limit
, mode
);
2766 alg_in
->cost
.cost
+= op_cost
;
2767 alg_in
->cost
.latency
+= op_cost
;
2768 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2770 best_cost
= alg_in
->cost
;
2771 std::swap (alg_in
, best_alg
);
2772 best_alg
->log
[best_alg
->ops
] = m
;
2773 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2781 /* If we have an odd number, add or subtract one. */
2784 unsigned HOST_WIDE_INT w
;
2787 for (w
= 1; (w
& t
) != 0; w
<<= 1)
2789 /* If T was -1, then W will be zero after the loop. This is another
2790 case where T ends with ...111. Handling this with (T + 1) and
2791 subtract 1 produces slightly better code and results in algorithm
2792 selection much faster than treating it like the ...0111 case
2796 /* Reject the case where t is 3.
2797 Thus we prefer addition in that case. */
2800 /* T ends with ...111. Multiply by (T + 1) and subtract T. */
2802 op_cost
= add_cost (speed
, mode
);
2803 new_limit
.cost
= best_cost
.cost
- op_cost
;
2804 new_limit
.latency
= best_cost
.latency
- op_cost
;
2805 synth_mult (alg_in
, t
+ 1, &new_limit
, mode
);
2807 alg_in
->cost
.cost
+= op_cost
;
2808 alg_in
->cost
.latency
+= op_cost
;
2809 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2811 best_cost
= alg_in
->cost
;
2812 std::swap (alg_in
, best_alg
);
2813 best_alg
->log
[best_alg
->ops
] = 0;
2814 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2819 /* T ends with ...01 or ...011. Multiply by (T - 1) and add T. */
2821 op_cost
= add_cost (speed
, mode
);
2822 new_limit
.cost
= best_cost
.cost
- op_cost
;
2823 new_limit
.latency
= best_cost
.latency
- op_cost
;
2824 synth_mult (alg_in
, t
- 1, &new_limit
, mode
);
2826 alg_in
->cost
.cost
+= op_cost
;
2827 alg_in
->cost
.latency
+= op_cost
;
2828 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2830 best_cost
= alg_in
->cost
;
2831 std::swap (alg_in
, best_alg
);
2832 best_alg
->log
[best_alg
->ops
] = 0;
2833 best_alg
->op
[best_alg
->ops
] = alg_add_t_m2
;
2837 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2838 quickly with a - a * n for some appropriate constant n. */
2839 m
= exact_log2 (-orig_t
+ 1);
2840 if (m
>= 0 && m
< maxm
)
2842 op_cost
= add_cost (speed
, mode
) + shift_cost (speed
, mode
, m
);
2843 /* If the target has a cheap shift-and-subtract insn use
2844 that in preference to a shift insn followed by a sub insn.
2845 Assume that the shift-and-sub is "atomic" with a latency
2846 equal to it's cost, otherwise assume that on superscalar
2847 hardware the shift may be executed concurrently with the
2848 earlier steps in the algorithm. */
2849 if (shiftsub1_cost (speed
, mode
, m
) <= op_cost
)
2851 op_cost
= shiftsub1_cost (speed
, mode
, m
);
2852 op_latency
= op_cost
;
2855 op_latency
= add_cost (speed
, mode
);
2857 new_limit
.cost
= best_cost
.cost
- op_cost
;
2858 new_limit
.latency
= best_cost
.latency
- op_latency
;
2859 synth_mult (alg_in
, (unsigned HOST_WIDE_INT
) (-orig_t
+ 1) >> m
,
2862 alg_in
->cost
.cost
+= op_cost
;
2863 alg_in
->cost
.latency
+= op_latency
;
2864 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2866 best_cost
= alg_in
->cost
;
2867 std::swap (alg_in
, best_alg
);
2868 best_alg
->log
[best_alg
->ops
] = m
;
2869 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2877 /* Look for factors of t of the form
2878 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2879 If we find such a factor, we can multiply by t using an algorithm that
2880 multiplies by q, shift the result by m and add/subtract it to itself.
2882 We search for large factors first and loop down, even if large factors
2883 are less probable than small; if we find a large factor we will find a
2884 good sequence quickly, and therefore be able to prune (by decreasing
2885 COST_LIMIT) the search. */
2887 do_alg_addsub_factor
:
2888 for (m
= floor_log2 (t
- 1); m
>= 2; m
--)
2890 unsigned HOST_WIDE_INT d
;
2892 d
= (HOST_WIDE_INT_1U
<< m
) + 1;
2893 if (t
% d
== 0 && t
> d
&& m
< maxm
2894 && (!cache_hit
|| cache_alg
== alg_add_factor
))
2896 op_cost
= add_cost (speed
, mode
) + shift_cost (speed
, mode
, m
);
2897 if (shiftadd_cost (speed
, mode
, m
) <= op_cost
)
2898 op_cost
= shiftadd_cost (speed
, mode
, m
);
2900 op_latency
= op_cost
;
2903 new_limit
.cost
= best_cost
.cost
- op_cost
;
2904 new_limit
.latency
= best_cost
.latency
- op_latency
;
2905 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2907 alg_in
->cost
.cost
+= op_cost
;
2908 alg_in
->cost
.latency
+= op_latency
;
2909 if (alg_in
->cost
.latency
< op_cost
)
2910 alg_in
->cost
.latency
= op_cost
;
2911 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2913 best_cost
= alg_in
->cost
;
2914 std::swap (alg_in
, best_alg
);
2915 best_alg
->log
[best_alg
->ops
] = m
;
2916 best_alg
->op
[best_alg
->ops
] = alg_add_factor
;
2918 /* Other factors will have been taken care of in the recursion. */
2922 d
= (HOST_WIDE_INT_1U
<< m
) - 1;
2923 if (t
% d
== 0 && t
> d
&& m
< maxm
2924 && (!cache_hit
|| cache_alg
== alg_sub_factor
))
2926 op_cost
= add_cost (speed
, mode
) + shift_cost (speed
, mode
, m
);
2927 if (shiftsub0_cost (speed
, mode
, m
) <= op_cost
)
2928 op_cost
= shiftsub0_cost (speed
, mode
, m
);
2930 op_latency
= op_cost
;
2932 new_limit
.cost
= best_cost
.cost
- op_cost
;
2933 new_limit
.latency
= best_cost
.latency
- op_latency
;
2934 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2936 alg_in
->cost
.cost
+= op_cost
;
2937 alg_in
->cost
.latency
+= op_latency
;
2938 if (alg_in
->cost
.latency
< op_cost
)
2939 alg_in
->cost
.latency
= op_cost
;
2940 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2942 best_cost
= alg_in
->cost
;
2943 std::swap (alg_in
, best_alg
);
2944 best_alg
->log
[best_alg
->ops
] = m
;
2945 best_alg
->op
[best_alg
->ops
] = alg_sub_factor
;
2953 /* Try shift-and-add (load effective address) instructions,
2954 i.e. do a*3, a*5, a*9. */
2962 op_cost
= shiftadd_cost (speed
, mode
, m
);
2963 new_limit
.cost
= best_cost
.cost
- op_cost
;
2964 new_limit
.latency
= best_cost
.latency
- op_cost
;
2965 synth_mult (alg_in
, (t
- 1) >> m
, &new_limit
, mode
);
2967 alg_in
->cost
.cost
+= op_cost
;
2968 alg_in
->cost
.latency
+= op_cost
;
2969 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2971 best_cost
= alg_in
->cost
;
2972 std::swap (alg_in
, best_alg
);
2973 best_alg
->log
[best_alg
->ops
] = m
;
2974 best_alg
->op
[best_alg
->ops
] = alg_add_t2_m
;
2985 op_cost
= shiftsub0_cost (speed
, mode
, m
);
2986 new_limit
.cost
= best_cost
.cost
- op_cost
;
2987 new_limit
.latency
= best_cost
.latency
- op_cost
;
2988 synth_mult (alg_in
, (t
+ 1) >> m
, &new_limit
, mode
);
2990 alg_in
->cost
.cost
+= op_cost
;
2991 alg_in
->cost
.latency
+= op_cost
;
2992 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2994 best_cost
= alg_in
->cost
;
2995 std::swap (alg_in
, best_alg
);
2996 best_alg
->log
[best_alg
->ops
] = m
;
2997 best_alg
->op
[best_alg
->ops
] = alg_sub_t2_m
;
3005 /* If best_cost has not decreased, we have not found any algorithm. */
3006 if (!CHEAPER_MULT_COST (&best_cost
, cost_limit
))
3008 /* We failed to find an algorithm. Record alg_impossible for
3009 this case (that is, <T, MODE, COST_LIMIT>) so that next time
3010 we are asked to find an algorithm for T within the same or
3011 lower COST_LIMIT, we can immediately return to the
3014 entry_ptr
->mode
= mode
;
3015 entry_ptr
->speed
= speed
;
3016 entry_ptr
->alg
= alg_impossible
;
3017 entry_ptr
->cost
= *cost_limit
;
3021 /* Cache the result. */
3025 entry_ptr
->mode
= mode
;
3026 entry_ptr
->speed
= speed
;
3027 entry_ptr
->alg
= best_alg
->op
[best_alg
->ops
];
3028 entry_ptr
->cost
.cost
= best_cost
.cost
;
3029 entry_ptr
->cost
.latency
= best_cost
.latency
;
3032 /* If we are getting a too long sequence for `struct algorithm'
3033 to record, make this search fail. */
3034 if (best_alg
->ops
== MAX_BITS_PER_WORD
)
3037 /* Copy the algorithm from temporary space to the space at alg_out.
3038 We avoid using structure assignment because the majority of
3039 best_alg is normally undefined, and this is a critical function. */
3040 alg_out
->ops
= best_alg
->ops
+ 1;
3041 alg_out
->cost
= best_cost
;
3042 memcpy (alg_out
->op
, best_alg
->op
,
3043 alg_out
->ops
* sizeof *alg_out
->op
);
3044 memcpy (alg_out
->log
, best_alg
->log
,
3045 alg_out
->ops
* sizeof *alg_out
->log
);
3048 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
3049 Try three variations:
3051 - a shift/add sequence based on VAL itself
3052 - a shift/add sequence based on -VAL, followed by a negation
3053 - a shift/add sequence based on VAL - 1, followed by an addition.
3055 Return true if the cheapest of these cost less than MULT_COST,
3056 describing the algorithm in *ALG and final fixup in *VARIANT. */
3059 choose_mult_variant (machine_mode mode
, HOST_WIDE_INT val
,
3060 struct algorithm
*alg
, enum mult_variant
*variant
,
3063 struct algorithm alg2
;
3064 struct mult_cost limit
;
3066 bool speed
= optimize_insn_for_speed_p ();
3068 /* Fail quickly for impossible bounds. */
3072 /* Ensure that mult_cost provides a reasonable upper bound.
3073 Any constant multiplication can be performed with less
3074 than 2 * bits additions. */
3075 op_cost
= 2 * GET_MODE_UNIT_BITSIZE (mode
) * add_cost (speed
, mode
);
3076 if (mult_cost
> op_cost
)
3077 mult_cost
= op_cost
;
3079 *variant
= basic_variant
;
3080 limit
.cost
= mult_cost
;
3081 limit
.latency
= mult_cost
;
3082 synth_mult (alg
, val
, &limit
, mode
);
3084 /* This works only if the inverted value actually fits in an
3086 if (HOST_BITS_PER_INT
>= GET_MODE_UNIT_BITSIZE (mode
))
3088 op_cost
= neg_cost (speed
, mode
);
3089 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
3091 limit
.cost
= alg
->cost
.cost
- op_cost
;
3092 limit
.latency
= alg
->cost
.latency
- op_cost
;
3096 limit
.cost
= mult_cost
- op_cost
;
3097 limit
.latency
= mult_cost
- op_cost
;
3100 synth_mult (&alg2
, -val
, &limit
, mode
);
3101 alg2
.cost
.cost
+= op_cost
;
3102 alg2
.cost
.latency
+= op_cost
;
3103 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
3104 *alg
= alg2
, *variant
= negate_variant
;
3107 /* This proves very useful for division-by-constant. */
3108 op_cost
= add_cost (speed
, mode
);
3109 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
3111 limit
.cost
= alg
->cost
.cost
- op_cost
;
3112 limit
.latency
= alg
->cost
.latency
- op_cost
;
3116 limit
.cost
= mult_cost
- op_cost
;
3117 limit
.latency
= mult_cost
- op_cost
;
3120 synth_mult (&alg2
, val
- 1, &limit
, mode
);
3121 alg2
.cost
.cost
+= op_cost
;
3122 alg2
.cost
.latency
+= op_cost
;
3123 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
3124 *alg
= alg2
, *variant
= add_variant
;
3126 return MULT_COST_LESS (&alg
->cost
, mult_cost
);
3129 /* A subroutine of expand_mult, used for constant multiplications.
3130 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
3131 convenient. Use the shift/add sequence described by ALG and apply
3132 the final fixup specified by VARIANT. */
3135 expand_mult_const (machine_mode mode
, rtx op0
, HOST_WIDE_INT val
,
3136 rtx target
, const struct algorithm
*alg
,
3137 enum mult_variant variant
)
3139 unsigned HOST_WIDE_INT val_so_far
;
3145 /* Avoid referencing memory over and over and invalid sharing
3147 op0
= force_reg (mode
, op0
);
3149 /* ACCUM starts out either as OP0 or as a zero, depending on
3150 the first operation. */
3152 if (alg
->op
[0] == alg_zero
)
3154 accum
= copy_to_mode_reg (mode
, CONST0_RTX (mode
));
3157 else if (alg
->op
[0] == alg_m
)
3159 accum
= copy_to_mode_reg (mode
, op0
);
3165 for (opno
= 1; opno
< alg
->ops
; opno
++)
3167 int log
= alg
->log
[opno
];
3168 rtx shift_subtarget
= optimize
? 0 : accum
;
3170 = (opno
== alg
->ops
- 1 && target
!= 0 && variant
!= add_variant
3173 rtx accum_target
= optimize
? 0 : accum
;
3176 switch (alg
->op
[opno
])
3179 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
3180 /* REG_EQUAL note will be attached to the following insn. */
3181 emit_move_insn (accum
, tem
);
3186 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
, log
, NULL_RTX
, 0);
3187 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
3188 add_target
? add_target
: accum_target
);
3189 val_so_far
+= HOST_WIDE_INT_1U
<< log
;
3193 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
, log
, NULL_RTX
, 0);
3194 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, tem
),
3195 add_target
? add_target
: accum_target
);
3196 val_so_far
-= HOST_WIDE_INT_1U
<< log
;
3200 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3201 log
, shift_subtarget
, 0);
3202 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
),
3203 add_target
? add_target
: accum_target
);
3204 val_so_far
= (val_so_far
<< log
) + 1;
3208 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3209 log
, shift_subtarget
, 0);
3210 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, op0
),
3211 add_target
? add_target
: accum_target
);
3212 val_so_far
= (val_so_far
<< log
) - 1;
3215 case alg_add_factor
:
3216 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
3217 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
3218 add_target
? add_target
: accum_target
);
3219 val_so_far
+= val_so_far
<< log
;
3222 case alg_sub_factor
:
3223 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
3224 accum
= force_operand (gen_rtx_MINUS (mode
, tem
, accum
),
3226 ? add_target
: (optimize
? 0 : tem
)));
3227 val_so_far
= (val_so_far
<< log
) - val_so_far
;
3234 if (SCALAR_INT_MODE_P (mode
))
3236 /* Write a REG_EQUAL note on the last insn so that we can cse
3237 multiplication sequences. Note that if ACCUM is a SUBREG,
3238 we've set the inner register and must properly indicate that. */
3239 tem
= op0
, nmode
= mode
;
3240 accum_inner
= accum
;
3241 if (GET_CODE (accum
) == SUBREG
)
3243 accum_inner
= SUBREG_REG (accum
);
3244 nmode
= GET_MODE (accum_inner
);
3245 tem
= gen_lowpart (nmode
, op0
);
3248 insn
= get_last_insn ();
3249 set_dst_reg_note (insn
, REG_EQUAL
,
3250 gen_rtx_MULT (nmode
, tem
,
3251 gen_int_mode (val_so_far
, nmode
)),
3256 if (variant
== negate_variant
)
3258 val_so_far
= -val_so_far
;
3259 accum
= expand_unop (mode
, neg_optab
, accum
, target
, 0);
3261 else if (variant
== add_variant
)
3263 val_so_far
= val_so_far
+ 1;
3264 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
), target
);
3267 /* Compare only the bits of val and val_so_far that are significant
3268 in the result mode, to avoid sign-/zero-extension confusion. */
3269 nmode
= GET_MODE_INNER (mode
);
3270 val
&= GET_MODE_MASK (nmode
);
3271 val_so_far
&= GET_MODE_MASK (nmode
);
3272 gcc_assert (val
== (HOST_WIDE_INT
) val_so_far
);
3277 /* Perform a multiplication and return an rtx for the result.
3278 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3279 TARGET is a suggestion for where to store the result (an rtx).
3281 We check specially for a constant integer as OP1.
3282 If you want this check for OP0 as well, then before calling
3283 you should swap the two operands if OP0 would be constant. */
3286 expand_mult (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3287 int unsignedp
, bool no_libcall
)
3289 enum mult_variant variant
;
3290 struct algorithm algorithm
;
3293 bool speed
= optimize_insn_for_speed_p ();
3294 bool do_trapv
= flag_trapv
&& SCALAR_INT_MODE_P (mode
) && !unsignedp
;
3296 if (CONSTANT_P (op0
))
3297 std::swap (op0
, op1
);
3299 /* For vectors, there are several simplifications that can be made if
3300 all elements of the vector constant are identical. */
3301 scalar_op1
= unwrap_const_vec_duplicate (op1
);
3303 if (INTEGRAL_MODE_P (mode
))
3306 HOST_WIDE_INT coeff
;
3310 if (op1
== CONST0_RTX (mode
))
3312 if (op1
== CONST1_RTX (mode
))
3314 if (op1
== CONSTM1_RTX (mode
))
3315 return expand_unop (mode
, do_trapv
? negv_optab
: neg_optab
,
3321 /* If mode is integer vector mode, check if the backend supports
3322 vector lshift (by scalar or vector) at all. If not, we can't use
3323 synthetized multiply. */
3324 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
3325 && optab_handler (vashl_optab
, mode
) == CODE_FOR_nothing
3326 && optab_handler (ashl_optab
, mode
) == CODE_FOR_nothing
)
3329 /* These are the operations that are potentially turned into
3330 a sequence of shifts and additions. */
3331 mode_bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
3333 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3334 less than or equal in size to `unsigned int' this doesn't matter.
3335 If the mode is larger than `unsigned int', then synth_mult works
3336 only if the constant value exactly fits in an `unsigned int' without
3337 any truncation. This means that multiplying by negative values does
3338 not work; results are off by 2^32 on a 32 bit machine. */
3339 if (CONST_INT_P (scalar_op1
))
3341 coeff
= INTVAL (scalar_op1
);
3344 #if TARGET_SUPPORTS_WIDE_INT
3345 else if (CONST_WIDE_INT_P (scalar_op1
))
3347 else if (CONST_DOUBLE_AS_INT_P (scalar_op1
))
3350 int shift
= wi::exact_log2 (rtx_mode_t (scalar_op1
, mode
));
3351 /* Perfect power of 2 (other than 1, which is handled above). */
3353 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3354 shift
, target
, unsignedp
);
3361 /* We used to test optimize here, on the grounds that it's better to
3362 produce a smaller program when -O is not used. But this causes
3363 such a terrible slowdown sometimes that it seems better to always
3366 /* Special case powers of two. */
3367 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
)
3368 && !(is_neg
&& mode_bitsize
> HOST_BITS_PER_WIDE_INT
))
3369 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3370 floor_log2 (coeff
), target
, unsignedp
);
3372 fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3374 /* Attempt to handle multiplication of DImode values by negative
3375 coefficients, by performing the multiplication by a positive
3376 multiplier and then inverting the result. */
3377 if (is_neg
&& mode_bitsize
> HOST_BITS_PER_WIDE_INT
)
3379 /* Its safe to use -coeff even for INT_MIN, as the
3380 result is interpreted as an unsigned coefficient.
3381 Exclude cost of op0 from max_cost to match the cost
3382 calculation of the synth_mult. */
3383 coeff
= -(unsigned HOST_WIDE_INT
) coeff
;
3384 max_cost
= (set_src_cost (gen_rtx_MULT (mode
, fake_reg
, op1
),
3386 - neg_cost (speed
, mode
));
3390 /* Special case powers of two. */
3391 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3393 rtx temp
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
3394 floor_log2 (coeff
), target
, unsignedp
);
3395 return expand_unop (mode
, neg_optab
, temp
, target
, 0);
3398 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3401 rtx temp
= expand_mult_const (mode
, op0
, coeff
, NULL_RTX
,
3402 &algorithm
, variant
);
3403 return expand_unop (mode
, neg_optab
, temp
, target
, 0);
3408 /* Exclude cost of op0 from max_cost to match the cost
3409 calculation of the synth_mult. */
3410 max_cost
= set_src_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), mode
, speed
);
3411 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
, max_cost
))
3412 return expand_mult_const (mode
, op0
, coeff
, target
,
3413 &algorithm
, variant
);
3417 /* Expand x*2.0 as x+x. */
3418 if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1
)
3419 && real_equal (CONST_DOUBLE_REAL_VALUE (scalar_op1
), &dconst2
))
3421 op0
= force_reg (GET_MODE (op0
), op0
);
3422 return expand_binop (mode
, add_optab
, op0
, op0
,
3424 no_libcall
? OPTAB_WIDEN
: OPTAB_LIB_WIDEN
);
3427 /* This used to use umul_optab if unsigned, but for non-widening multiply
3428 there is no difference between signed and unsigned. */
3429 op0
= expand_binop (mode
, do_trapv
? smulv_optab
: smul_optab
,
3430 op0
, op1
, target
, unsignedp
,
3431 no_libcall
? OPTAB_WIDEN
: OPTAB_LIB_WIDEN
);
3432 gcc_assert (op0
|| no_libcall
);
3436 /* Return a cost estimate for multiplying a register by the given
3437 COEFFicient in the given MODE and SPEED. */
3440 mult_by_coeff_cost (HOST_WIDE_INT coeff
, machine_mode mode
, bool speed
)
3443 struct algorithm algorithm
;
3444 enum mult_variant variant
;
3446 rtx fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3447 max_cost
= set_src_cost (gen_rtx_MULT (mode
, fake_reg
, fake_reg
),
3449 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
, max_cost
))
3450 return algorithm
.cost
.cost
;
3455 /* Perform a widening multiplication and return an rtx for the result.
3456 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3457 TARGET is a suggestion for where to store the result (an rtx).
3458 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3459 or smul_widen_optab.
3461 We check specially for a constant integer as OP1, comparing the
3462 cost of a widening multiply against the cost of a sequence of shifts
3466 expand_widening_mult (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3467 int unsignedp
, optab this_optab
)
3469 bool speed
= optimize_insn_for_speed_p ();
3472 if (CONST_INT_P (op1
)
3473 && GET_MODE (op0
) != VOIDmode
3474 && (cop1
= convert_modes (mode
, GET_MODE (op0
), op1
,
3475 this_optab
== umul_widen_optab
))
3476 && CONST_INT_P (cop1
)
3477 && (INTVAL (cop1
) >= 0
3478 || HWI_COMPUTABLE_MODE_P (mode
)))
3480 HOST_WIDE_INT coeff
= INTVAL (cop1
);
3482 enum mult_variant variant
;
3483 struct algorithm algorithm
;
3486 return CONST0_RTX (mode
);
3488 /* Special case powers of two. */
3489 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3491 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3492 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3493 floor_log2 (coeff
), target
, unsignedp
);
3496 /* Exclude cost of op0 from max_cost to match the cost
3497 calculation of the synth_mult. */
3498 max_cost
= mul_widen_cost (speed
, mode
);
3499 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3502 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3503 return expand_mult_const (mode
, op0
, coeff
, target
,
3504 &algorithm
, variant
);
3507 return expand_binop (mode
, this_optab
, op0
, op1
, target
,
3508 unsignedp
, OPTAB_LIB_WIDEN
);
3511 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3512 replace division by D, and put the least significant N bits of the result
3513 in *MULTIPLIER_PTR and return the most significant bit.
3515 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3516 needed precision is in PRECISION (should be <= N).
3518 PRECISION should be as small as possible so this function can choose
3519 multiplier more freely.
3521 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3522 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3524 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3525 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3527 unsigned HOST_WIDE_INT
3528 choose_multiplier (unsigned HOST_WIDE_INT d
, int n
, int precision
,
3529 unsigned HOST_WIDE_INT
*multiplier_ptr
,
3530 int *post_shift_ptr
, int *lgup_ptr
)
3532 int lgup
, post_shift
;
3535 /* lgup = ceil(log2(divisor)); */
3536 lgup
= ceil_log2 (d
);
3538 gcc_assert (lgup
<= n
);
3541 pow2
= n
+ lgup
- precision
;
3543 /* mlow = 2^(N + lgup)/d */
3544 wide_int val
= wi::set_bit_in_zero (pow
, HOST_BITS_PER_DOUBLE_INT
);
3545 wide_int mlow
= wi::udiv_trunc (val
, d
);
3547 /* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
3548 val
|= wi::set_bit_in_zero (pow2
, HOST_BITS_PER_DOUBLE_INT
);
3549 wide_int mhigh
= wi::udiv_trunc (val
, d
);
3551 /* If precision == N, then mlow, mhigh exceed 2^N
3552 (but they do not exceed 2^(N+1)). */
3554 /* Reduce to lowest terms. */
3555 for (post_shift
= lgup
; post_shift
> 0; post_shift
--)
3557 unsigned HOST_WIDE_INT ml_lo
= wi::extract_uhwi (mlow
, 1,
3558 HOST_BITS_PER_WIDE_INT
);
3559 unsigned HOST_WIDE_INT mh_lo
= wi::extract_uhwi (mhigh
, 1,
3560 HOST_BITS_PER_WIDE_INT
);
3564 mlow
= wi::uhwi (ml_lo
, HOST_BITS_PER_DOUBLE_INT
);
3565 mhigh
= wi::uhwi (mh_lo
, HOST_BITS_PER_DOUBLE_INT
);
3568 *post_shift_ptr
= post_shift
;
3570 if (n
< HOST_BITS_PER_WIDE_INT
)
3572 unsigned HOST_WIDE_INT mask
= (HOST_WIDE_INT_1U
<< n
) - 1;
3573 *multiplier_ptr
= mhigh
.to_uhwi () & mask
;
3574 return mhigh
.to_uhwi () >= mask
;
3578 *multiplier_ptr
= mhigh
.to_uhwi ();
3579 return wi::extract_uhwi (mhigh
, HOST_BITS_PER_WIDE_INT
, 1);
3583 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3584 congruent to 1 (mod 2**N). */
3586 static unsigned HOST_WIDE_INT
3587 invert_mod2n (unsigned HOST_WIDE_INT x
, int n
)
3589 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3591 /* The algorithm notes that the choice y = x satisfies
3592 x*y == 1 mod 2^3, since x is assumed odd.
3593 Each iteration doubles the number of bits of significance in y. */
3595 unsigned HOST_WIDE_INT mask
;
3596 unsigned HOST_WIDE_INT y
= x
;
3599 mask
= (n
== HOST_BITS_PER_WIDE_INT
3601 : (HOST_WIDE_INT_1U
<< n
) - 1);
3605 y
= y
* (2 - x
*y
) & mask
; /* Modulo 2^N */
3611 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3612 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3613 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3614 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3617 The result is put in TARGET if that is convenient.
3619 MODE is the mode of operation. */
3622 expand_mult_highpart_adjust (scalar_int_mode mode
, rtx adj_operand
, rtx op0
,
3623 rtx op1
, rtx target
, int unsignedp
)
3626 enum rtx_code adj_code
= unsignedp
? PLUS
: MINUS
;
3628 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3629 GET_MODE_BITSIZE (mode
) - 1, NULL_RTX
, 0);
3630 tem
= expand_and (mode
, tem
, op1
, NULL_RTX
);
3632 = force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3635 tem
= expand_shift (RSHIFT_EXPR
, mode
, op1
,
3636 GET_MODE_BITSIZE (mode
) - 1, NULL_RTX
, 0);
3637 tem
= expand_and (mode
, tem
, op0
, NULL_RTX
);
3638 target
= force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3644 /* Subroutine of expmed_mult_highpart. Return the MODE high part of OP. */
3647 extract_high_half (scalar_int_mode mode
, rtx op
)
3649 if (mode
== word_mode
)
3650 return gen_highpart (mode
, op
);
3652 scalar_int_mode wider_mode
= GET_MODE_WIDER_MODE (mode
).require ();
3654 op
= expand_shift (RSHIFT_EXPR
, wider_mode
, op
,
3655 GET_MODE_BITSIZE (mode
), 0, 1);
3656 return convert_modes (mode
, wider_mode
, op
, 0);
3659 /* Like expmed_mult_highpart, but only consider using a multiplication
3660 optab. OP1 is an rtx for the constant operand. */
3663 expmed_mult_highpart_optab (scalar_int_mode mode
, rtx op0
, rtx op1
,
3664 rtx target
, int unsignedp
, int max_cost
)
3666 rtx narrow_op1
= gen_int_mode (INTVAL (op1
), mode
);
3670 bool speed
= optimize_insn_for_speed_p ();
3672 scalar_int_mode wider_mode
= GET_MODE_WIDER_MODE (mode
).require ();
3674 size
= GET_MODE_BITSIZE (mode
);
3676 /* Firstly, try using a multiplication insn that only generates the needed
3677 high part of the product, and in the sign flavor of unsignedp. */
3678 if (mul_highpart_cost (speed
, mode
) < max_cost
)
3680 moptab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
3681 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3682 unsignedp
, OPTAB_DIRECT
);
3687 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3688 Need to adjust the result after the multiplication. */
3689 if (size
- 1 < BITS_PER_WORD
3690 && (mul_highpart_cost (speed
, mode
)
3691 + 2 * shift_cost (speed
, mode
, size
-1)
3692 + 4 * add_cost (speed
, mode
) < max_cost
))
3694 moptab
= unsignedp
? smul_highpart_optab
: umul_highpart_optab
;
3695 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3696 unsignedp
, OPTAB_DIRECT
);
3698 /* We used the wrong signedness. Adjust the result. */
3699 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3703 /* Try widening multiplication. */
3704 moptab
= unsignedp
? umul_widen_optab
: smul_widen_optab
;
3705 if (convert_optab_handler (moptab
, wider_mode
, mode
) != CODE_FOR_nothing
3706 && mul_widen_cost (speed
, wider_mode
) < max_cost
)
3708 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
, 0,
3709 unsignedp
, OPTAB_WIDEN
);
3711 return extract_high_half (mode
, tem
);
3714 /* Try widening the mode and perform a non-widening multiplication. */
3715 if (optab_handler (smul_optab
, wider_mode
) != CODE_FOR_nothing
3716 && size
- 1 < BITS_PER_WORD
3717 && (mul_cost (speed
, wider_mode
) + shift_cost (speed
, mode
, size
-1)
3723 /* We need to widen the operands, for example to ensure the
3724 constant multiplier is correctly sign or zero extended.
3725 Use a sequence to clean-up any instructions emitted by
3726 the conversions if things don't work out. */
3728 wop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
3729 wop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
3730 tem
= expand_binop (wider_mode
, smul_optab
, wop0
, wop1
, 0,
3731 unsignedp
, OPTAB_WIDEN
);
3732 insns
= get_insns ();
3738 return extract_high_half (mode
, tem
);
3742 /* Try widening multiplication of opposite signedness, and adjust. */
3743 moptab
= unsignedp
? smul_widen_optab
: umul_widen_optab
;
3744 if (convert_optab_handler (moptab
, wider_mode
, mode
) != CODE_FOR_nothing
3745 && size
- 1 < BITS_PER_WORD
3746 && (mul_widen_cost (speed
, wider_mode
)
3747 + 2 * shift_cost (speed
, mode
, size
-1)
3748 + 4 * add_cost (speed
, mode
) < max_cost
))
3750 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
,
3751 NULL_RTX
, ! unsignedp
, OPTAB_WIDEN
);
3754 tem
= extract_high_half (mode
, tem
);
3755 /* We used the wrong signedness. Adjust the result. */
3756 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3764 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3765 putting the high half of the result in TARGET if that is convenient,
3766 and return where the result is. If the operation can not be performed,
3769 MODE is the mode of operation and result.
3771 UNSIGNEDP nonzero means unsigned multiply.
3773 MAX_COST is the total allowed cost for the expanded RTL. */
3776 expmed_mult_highpart (scalar_int_mode mode
, rtx op0
, rtx op1
,
3777 rtx target
, int unsignedp
, int max_cost
)
3779 unsigned HOST_WIDE_INT cnst1
;
3781 bool sign_adjust
= false;
3782 enum mult_variant variant
;
3783 struct algorithm alg
;
3785 bool speed
= optimize_insn_for_speed_p ();
3787 /* We can't support modes wider than HOST_BITS_PER_INT. */
3788 gcc_assert (HWI_COMPUTABLE_MODE_P (mode
));
3790 cnst1
= INTVAL (op1
) & GET_MODE_MASK (mode
);
3792 /* We can't optimize modes wider than BITS_PER_WORD.
3793 ??? We might be able to perform double-word arithmetic if
3794 mode == word_mode, however all the cost calculations in
3795 synth_mult etc. assume single-word operations. */
3796 scalar_int_mode wider_mode
= GET_MODE_WIDER_MODE (mode
).require ();
3797 if (GET_MODE_BITSIZE (wider_mode
) > BITS_PER_WORD
)
3798 return expmed_mult_highpart_optab (mode
, op0
, op1
, target
,
3799 unsignedp
, max_cost
);
3801 extra_cost
= shift_cost (speed
, mode
, GET_MODE_BITSIZE (mode
) - 1);
3803 /* Check whether we try to multiply by a negative constant. */
3804 if (!unsignedp
&& ((cnst1
>> (GET_MODE_BITSIZE (mode
) - 1)) & 1))
3807 extra_cost
+= add_cost (speed
, mode
);
3810 /* See whether shift/add multiplication is cheap enough. */
3811 if (choose_mult_variant (wider_mode
, cnst1
, &alg
, &variant
,
3812 max_cost
- extra_cost
))
3814 /* See whether the specialized multiplication optabs are
3815 cheaper than the shift/add version. */
3816 tem
= expmed_mult_highpart_optab (mode
, op0
, op1
, target
, unsignedp
,
3817 alg
.cost
.cost
+ extra_cost
);
3821 tem
= convert_to_mode (wider_mode
, op0
, unsignedp
);
3822 tem
= expand_mult_const (wider_mode
, tem
, cnst1
, 0, &alg
, variant
);
3823 tem
= extract_high_half (mode
, tem
);
3825 /* Adjust result for signedness. */
3827 tem
= force_operand (gen_rtx_MINUS (mode
, tem
, op0
), tem
);
3831 return expmed_mult_highpart_optab (mode
, op0
, op1
, target
,
3832 unsignedp
, max_cost
);
3836 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3839 expand_smod_pow2 (scalar_int_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3841 rtx result
, temp
, shift
;
3842 rtx_code_label
*label
;
3844 int prec
= GET_MODE_PRECISION (mode
);
3846 logd
= floor_log2 (d
);
3847 result
= gen_reg_rtx (mode
);
3849 /* Avoid conditional branches when they're expensive. */
3850 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3851 && optimize_insn_for_speed_p ())
3853 rtx signmask
= emit_store_flag (result
, LT
, op0
, const0_rtx
,
3857 HOST_WIDE_INT masklow
= (HOST_WIDE_INT_1
<< logd
) - 1;
3858 signmask
= force_reg (mode
, signmask
);
3859 shift
= GEN_INT (GET_MODE_BITSIZE (mode
) - logd
);
3861 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3862 which instruction sequence to use. If logical right shifts
3863 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3864 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3866 temp
= gen_rtx_LSHIFTRT (mode
, result
, shift
);
3867 if (optab_handler (lshr_optab
, mode
) == CODE_FOR_nothing
3868 || (set_src_cost (temp
, mode
, optimize_insn_for_speed_p ())
3869 > COSTS_N_INSNS (2)))
3871 temp
= expand_binop (mode
, xor_optab
, op0
, signmask
,
3872 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3873 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3874 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3875 temp
= expand_binop (mode
, and_optab
, temp
,
3876 gen_int_mode (masklow
, mode
),
3877 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3878 temp
= expand_binop (mode
, xor_optab
, temp
, signmask
,
3879 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3880 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3881 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3885 signmask
= expand_binop (mode
, lshr_optab
, signmask
, shift
,
3886 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3887 signmask
= force_reg (mode
, signmask
);
3889 temp
= expand_binop (mode
, add_optab
, op0
, signmask
,
3890 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3891 temp
= expand_binop (mode
, and_optab
, temp
,
3892 gen_int_mode (masklow
, mode
),
3893 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3894 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3895 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3901 /* Mask contains the mode's signbit and the significant bits of the
3902 modulus. By including the signbit in the operation, many targets
3903 can avoid an explicit compare operation in the following comparison
3905 wide_int mask
= wi::mask (logd
, false, prec
);
3906 mask
= wi::set_bit (mask
, prec
- 1);
3908 temp
= expand_binop (mode
, and_optab
, op0
,
3909 immed_wide_int_const (mask
, mode
),
3910 result
, 1, OPTAB_LIB_WIDEN
);
3912 emit_move_insn (result
, temp
);
3914 label
= gen_label_rtx ();
3915 do_cmp_and_jump (result
, const0_rtx
, GE
, mode
, label
);
3917 temp
= expand_binop (mode
, sub_optab
, result
, const1_rtx
, result
,
3918 0, OPTAB_LIB_WIDEN
);
3920 mask
= wi::mask (logd
, true, prec
);
3921 temp
= expand_binop (mode
, ior_optab
, temp
,
3922 immed_wide_int_const (mask
, mode
),
3923 result
, 1, OPTAB_LIB_WIDEN
);
3924 temp
= expand_binop (mode
, add_optab
, temp
, const1_rtx
, result
,
3925 0, OPTAB_LIB_WIDEN
);
3927 emit_move_insn (result
, temp
);
3932 /* Expand signed division of OP0 by a power of two D in mode MODE.
3933 This routine is only called for positive values of D. */
3936 expand_sdiv_pow2 (scalar_int_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3939 rtx_code_label
*label
;
3942 logd
= floor_log2 (d
);
3945 && BRANCH_COST (optimize_insn_for_speed_p (),
3948 temp
= gen_reg_rtx (mode
);
3949 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, 1);
3950 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3951 0, OPTAB_LIB_WIDEN
);
3952 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
3955 if (HAVE_conditional_move
3956 && BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2)
3961 temp2
= copy_to_mode_reg (mode
, op0
);
3962 temp
= expand_binop (mode
, add_optab
, temp2
, gen_int_mode (d
- 1, mode
),
3963 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3964 temp
= force_reg (mode
, temp
);
3966 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3967 temp2
= emit_conditional_move (temp2
, LT
, temp2
, const0_rtx
,
3968 mode
, temp
, temp2
, mode
, 0);
3971 rtx_insn
*seq
= get_insns ();
3974 return expand_shift (RSHIFT_EXPR
, mode
, temp2
, logd
, NULL_RTX
, 0);
3979 if (BRANCH_COST (optimize_insn_for_speed_p (),
3982 int ushift
= GET_MODE_BITSIZE (mode
) - logd
;
3984 temp
= gen_reg_rtx (mode
);
3985 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, -1);
3986 if (GET_MODE_BITSIZE (mode
) >= BITS_PER_WORD
3987 || shift_cost (optimize_insn_for_speed_p (), mode
, ushift
)
3988 > COSTS_N_INSNS (1))
3989 temp
= expand_binop (mode
, and_optab
, temp
, gen_int_mode (d
- 1, mode
),
3990 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3992 temp
= expand_shift (RSHIFT_EXPR
, mode
, temp
,
3993 ushift
, NULL_RTX
, 1);
3994 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3995 0, OPTAB_LIB_WIDEN
);
3996 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
3999 label
= gen_label_rtx ();
4000 temp
= copy_to_mode_reg (mode
, op0
);
4001 do_cmp_and_jump (temp
, const0_rtx
, GE
, mode
, label
);
4002 expand_inc (temp
, gen_int_mode (d
- 1, mode
));
4004 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
4007 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
4008 if that is convenient, and returning where the result is.
4009 You may request either the quotient or the remainder as the result;
4010 specify REM_FLAG nonzero to get the remainder.
4012 CODE is the expression code for which kind of division this is;
4013 it controls how rounding is done. MODE is the machine mode to use.
4014 UNSIGNEDP nonzero means do unsigned division. */
4016 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
4017 and then correct it by or'ing in missing high bits
4018 if result of ANDI is nonzero.
4019 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
4020 This could optimize to a bfexts instruction.
4021 But C doesn't use these operations, so their optimizations are
4023 /* ??? For modulo, we don't actually need the highpart of the first product,
4024 the low part will do nicely. And for small divisors, the second multiply
4025 can also be a low-part only multiply or even be completely left out.
4026 E.g. to calculate the remainder of a division by 3 with a 32 bit
4027 multiply, multiply with 0x55555556 and extract the upper two bits;
4028 the result is exact for inputs up to 0x1fffffff.
4029 The input range can be reduced by using cross-sum rules.
4030 For odd divisors >= 3, the following table gives right shift counts
4031 so that if a number is shifted by an integer multiple of the given
4032 amount, the remainder stays the same:
4033 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
4034 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
4035 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
4036 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
4037 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
4039 Cross-sum rules for even numbers can be derived by leaving as many bits
4040 to the right alone as the divisor has zeros to the right.
4041 E.g. if x is an unsigned 32 bit number:
4042 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
4046 expand_divmod (int rem_flag
, enum tree_code code
, machine_mode mode
,
4047 rtx op0
, rtx op1
, rtx target
, int unsignedp
)
4049 machine_mode compute_mode
;
4051 rtx quotient
= 0, remainder
= 0;
4054 optab optab1
, optab2
;
4055 int op1_is_constant
, op1_is_pow2
= 0;
4056 int max_cost
, extra_cost
;
4057 static HOST_WIDE_INT last_div_const
= 0;
4058 bool speed
= optimize_insn_for_speed_p ();
4060 op1_is_constant
= CONST_INT_P (op1
);
4061 if (op1_is_constant
)
4063 wide_int ext_op1
= rtx_mode_t (op1
, mode
);
4064 op1_is_pow2
= (wi::popcount (ext_op1
) == 1
4066 && wi::popcount (wi::neg (ext_op1
)) == 1));
4070 This is the structure of expand_divmod:
4072 First comes code to fix up the operands so we can perform the operations
4073 correctly and efficiently.
4075 Second comes a switch statement with code specific for each rounding mode.
4076 For some special operands this code emits all RTL for the desired
4077 operation, for other cases, it generates only a quotient and stores it in
4078 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
4079 to indicate that it has not done anything.
4081 Last comes code that finishes the operation. If QUOTIENT is set and
4082 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
4083 QUOTIENT is not set, it is computed using trunc rounding.
4085 We try to generate special code for division and remainder when OP1 is a
4086 constant. If |OP1| = 2**n we can use shifts and some other fast
4087 operations. For other values of OP1, we compute a carefully selected
4088 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
4091 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
4092 half of the product. Different strategies for generating the product are
4093 implemented in expmed_mult_highpart.
4095 If what we actually want is the remainder, we generate that by another
4096 by-constant multiplication and a subtraction. */
4098 /* We shouldn't be called with OP1 == const1_rtx, but some of the
4099 code below will malfunction if we are, so check here and handle
4100 the special case if so. */
4101 if (op1
== const1_rtx
)
4102 return rem_flag
? const0_rtx
: op0
;
4104 /* When dividing by -1, we could get an overflow.
4105 negv_optab can handle overflows. */
4106 if (! unsignedp
&& op1
== constm1_rtx
)
4110 return expand_unop (mode
, flag_trapv
&& GET_MODE_CLASS (mode
) == MODE_INT
4111 ? negv_optab
: neg_optab
, op0
, target
, 0);
4115 /* Don't use the function value register as a target
4116 since we have to read it as well as write it,
4117 and function-inlining gets confused by this. */
4118 && ((REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
4119 /* Don't clobber an operand while doing a multi-step calculation. */
4120 || ((rem_flag
|| op1_is_constant
)
4121 && (reg_mentioned_p (target
, op0
)
4122 || (MEM_P (op0
) && MEM_P (target
))))
4123 || reg_mentioned_p (target
, op1
)
4124 || (MEM_P (op1
) && MEM_P (target
))))
4127 /* Get the mode in which to perform this computation. Normally it will
4128 be MODE, but sometimes we can't do the desired operation in MODE.
4129 If so, pick a wider mode in which we can do the operation. Convert
4130 to that mode at the start to avoid repeated conversions.
4132 First see what operations we need. These depend on the expression
4133 we are evaluating. (We assume that divxx3 insns exist under the
4134 same conditions that modxx3 insns and that these insns don't normally
4135 fail. If these assumptions are not correct, we may generate less
4136 efficient code in some cases.)
4138 Then see if we find a mode in which we can open-code that operation
4139 (either a division, modulus, or shift). Finally, check for the smallest
4140 mode for which we can do the operation with a library call. */
4142 /* We might want to refine this now that we have division-by-constant
4143 optimization. Since expmed_mult_highpart tries so many variants, it is
4144 not straightforward to generalize this. Maybe we should make an array
4145 of possible modes in init_expmed? Save this for GCC 2.7. */
4147 optab1
= (op1_is_pow2
4148 ? (unsignedp
? lshr_optab
: ashr_optab
)
4149 : (unsignedp
? udiv_optab
: sdiv_optab
));
4150 optab2
= (op1_is_pow2
? optab1
4151 : (unsignedp
? udivmod_optab
: sdivmod_optab
));
4153 FOR_EACH_MODE_FROM (compute_mode
, mode
)
4154 if (optab_handler (optab1
, compute_mode
) != CODE_FOR_nothing
4155 || optab_handler (optab2
, compute_mode
) != CODE_FOR_nothing
)
4158 if (compute_mode
== VOIDmode
)
4159 FOR_EACH_MODE_FROM (compute_mode
, mode
)
4160 if (optab_libfunc (optab1
, compute_mode
)
4161 || optab_libfunc (optab2
, compute_mode
))
4164 /* If we still couldn't find a mode, use MODE, but expand_binop will
4166 if (compute_mode
== VOIDmode
)
4167 compute_mode
= mode
;
4169 if (target
&& GET_MODE (target
) == compute_mode
)
4172 tquotient
= gen_reg_rtx (compute_mode
);
4175 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
4176 (mode), and thereby get better code when OP1 is a constant. Do that
4177 later. It will require going over all usages of SIZE below. */
4178 size
= GET_MODE_BITSIZE (mode
);
4181 /* Only deduct something for a REM if the last divide done was
4182 for a different constant. Then set the constant of the last
4184 max_cost
= (unsignedp
4185 ? udiv_cost (speed
, compute_mode
)
4186 : sdiv_cost (speed
, compute_mode
));
4187 if (rem_flag
&& ! (last_div_const
!= 0 && op1_is_constant
4188 && INTVAL (op1
) == last_div_const
))
4189 max_cost
-= (mul_cost (speed
, compute_mode
)
4190 + add_cost (speed
, compute_mode
));
4192 last_div_const
= ! rem_flag
&& op1_is_constant
? INTVAL (op1
) : 0;
4194 /* Now convert to the best mode to use. */
4195 if (compute_mode
!= mode
)
4197 op0
= convert_modes (compute_mode
, mode
, op0
, unsignedp
);
4198 op1
= convert_modes (compute_mode
, mode
, op1
, unsignedp
);
4200 /* convert_modes may have placed op1 into a register, so we
4201 must recompute the following. */
4202 op1_is_constant
= CONST_INT_P (op1
);
4203 if (op1_is_constant
)
4205 wide_int ext_op1
= rtx_mode_t (op1
, compute_mode
);
4206 op1_is_pow2
= (wi::popcount (ext_op1
) == 1
4208 && wi::popcount (wi::neg (ext_op1
)) == 1));
4214 /* If one of the operands is a volatile MEM, copy it into a register. */
4216 if (MEM_P (op0
) && MEM_VOLATILE_P (op0
))
4217 op0
= force_reg (compute_mode
, op0
);
4218 if (MEM_P (op1
) && MEM_VOLATILE_P (op1
))
4219 op1
= force_reg (compute_mode
, op1
);
4221 /* If we need the remainder or if OP1 is constant, we need to
4222 put OP0 in a register in case it has any queued subexpressions. */
4223 if (rem_flag
|| op1_is_constant
)
4224 op0
= force_reg (compute_mode
, op0
);
4226 last
= get_last_insn ();
4228 /* Promote floor rounding to trunc rounding for unsigned operations. */
4231 if (code
== FLOOR_DIV_EXPR
)
4232 code
= TRUNC_DIV_EXPR
;
4233 if (code
== FLOOR_MOD_EXPR
)
4234 code
= TRUNC_MOD_EXPR
;
4235 if (code
== EXACT_DIV_EXPR
&& op1_is_pow2
)
4236 code
= TRUNC_DIV_EXPR
;
4239 if (op1
!= const0_rtx
)
4242 case TRUNC_MOD_EXPR
:
4243 case TRUNC_DIV_EXPR
:
4244 if (op1_is_constant
)
4246 scalar_int_mode int_mode
= as_a
<scalar_int_mode
> (compute_mode
);
4247 int size
= GET_MODE_BITSIZE (int_mode
);
4250 unsigned HOST_WIDE_INT mh
, ml
;
4251 int pre_shift
, post_shift
;
4253 wide_int wd
= rtx_mode_t (op1
, int_mode
);
4254 unsigned HOST_WIDE_INT d
= wd
.to_uhwi ();
4256 if (wi::popcount (wd
) == 1)
4258 pre_shift
= floor_log2 (d
);
4261 unsigned HOST_WIDE_INT mask
4262 = (HOST_WIDE_INT_1U
<< pre_shift
) - 1;
4264 = expand_binop (int_mode
, and_optab
, op0
,
4265 gen_int_mode (mask
, int_mode
),
4269 return gen_lowpart (mode
, remainder
);
4271 quotient
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
4272 pre_shift
, tquotient
, 1);
4274 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4276 if (d
>= (HOST_WIDE_INT_1U
<< (size
- 1)))
4278 /* Most significant bit of divisor is set; emit an scc
4280 quotient
= emit_store_flag_force (tquotient
, GEU
, op0
, op1
,
4285 /* Find a suitable multiplier and right shift count
4286 instead of multiplying with D. */
4288 mh
= choose_multiplier (d
, size
, size
,
4289 &ml
, &post_shift
, &dummy
);
4291 /* If the suggested multiplier is more than SIZE bits,
4292 we can do better for even divisors, using an
4293 initial right shift. */
4294 if (mh
!= 0 && (d
& 1) == 0)
4296 pre_shift
= ctz_or_zero (d
);
4297 mh
= choose_multiplier (d
>> pre_shift
, size
,
4299 &ml
, &post_shift
, &dummy
);
4309 if (post_shift
- 1 >= BITS_PER_WORD
)
4313 = (shift_cost (speed
, int_mode
, post_shift
- 1)
4314 + shift_cost (speed
, int_mode
, 1)
4315 + 2 * add_cost (speed
, int_mode
));
4316 t1
= expmed_mult_highpart
4317 (int_mode
, op0
, gen_int_mode (ml
, int_mode
),
4318 NULL_RTX
, 1, max_cost
- extra_cost
);
4321 t2
= force_operand (gen_rtx_MINUS (int_mode
,
4324 t3
= expand_shift (RSHIFT_EXPR
, int_mode
,
4325 t2
, 1, NULL_RTX
, 1);
4326 t4
= force_operand (gen_rtx_PLUS (int_mode
,
4329 quotient
= expand_shift
4330 (RSHIFT_EXPR
, int_mode
, t4
,
4331 post_shift
- 1, tquotient
, 1);
4337 if (pre_shift
>= BITS_PER_WORD
4338 || post_shift
>= BITS_PER_WORD
)
4342 (RSHIFT_EXPR
, int_mode
, op0
,
4343 pre_shift
, NULL_RTX
, 1);
4345 = (shift_cost (speed
, int_mode
, pre_shift
)
4346 + shift_cost (speed
, int_mode
, post_shift
));
4347 t2
= expmed_mult_highpart
4349 gen_int_mode (ml
, int_mode
),
4350 NULL_RTX
, 1, max_cost
- extra_cost
);
4353 quotient
= expand_shift
4354 (RSHIFT_EXPR
, int_mode
, t2
,
4355 post_shift
, tquotient
, 1);
4359 else /* Too wide mode to use tricky code */
4362 insn
= get_last_insn ();
4364 set_dst_reg_note (insn
, REG_EQUAL
,
4365 gen_rtx_UDIV (int_mode
, op0
, op1
),
4368 else /* TRUNC_DIV, signed */
4370 unsigned HOST_WIDE_INT ml
;
4371 int lgup
, post_shift
;
4373 HOST_WIDE_INT d
= INTVAL (op1
);
4374 unsigned HOST_WIDE_INT abs_d
;
4376 /* Since d might be INT_MIN, we have to cast to
4377 unsigned HOST_WIDE_INT before negating to avoid
4378 undefined signed overflow. */
4380 ? (unsigned HOST_WIDE_INT
) d
4381 : - (unsigned HOST_WIDE_INT
) d
);
4383 /* n rem d = n rem -d */
4384 if (rem_flag
&& d
< 0)
4387 op1
= gen_int_mode (abs_d
, int_mode
);
4393 quotient
= expand_unop (int_mode
, neg_optab
, op0
,
4395 else if (size
<= HOST_BITS_PER_WIDE_INT
4396 && abs_d
== HOST_WIDE_INT_1U
<< (size
- 1))
4398 /* This case is not handled correctly below. */
4399 quotient
= emit_store_flag (tquotient
, EQ
, op0
, op1
,
4404 else if (EXACT_POWER_OF_2_OR_ZERO_P (d
)
4405 && (size
<= HOST_BITS_PER_WIDE_INT
|| d
>= 0)
4407 ? smod_pow2_cheap (speed
, int_mode
)
4408 : sdiv_pow2_cheap (speed
, int_mode
))
4409 /* We assume that cheap metric is true if the
4410 optab has an expander for this mode. */
4411 && ((optab_handler ((rem_flag
? smod_optab
4414 != CODE_FOR_nothing
)
4415 || (optab_handler (sdivmod_optab
, int_mode
)
4416 != CODE_FOR_nothing
)))
4418 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d
)
4419 && (size
<= HOST_BITS_PER_WIDE_INT
4420 || abs_d
!= (unsigned HOST_WIDE_INT
) d
))
4424 remainder
= expand_smod_pow2 (int_mode
, op0
, d
);
4426 return gen_lowpart (mode
, remainder
);
4429 if (sdiv_pow2_cheap (speed
, int_mode
)
4430 && ((optab_handler (sdiv_optab
, int_mode
)
4431 != CODE_FOR_nothing
)
4432 || (optab_handler (sdivmod_optab
, int_mode
)
4433 != CODE_FOR_nothing
)))
4434 quotient
= expand_divmod (0, TRUNC_DIV_EXPR
,
4436 gen_int_mode (abs_d
,
4440 quotient
= expand_sdiv_pow2 (int_mode
, op0
, abs_d
);
4442 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4443 negate the quotient. */
4446 insn
= get_last_insn ();
4448 && abs_d
< (HOST_WIDE_INT_1U
4449 << (HOST_BITS_PER_WIDE_INT
- 1)))
4450 set_dst_reg_note (insn
, REG_EQUAL
,
4451 gen_rtx_DIV (int_mode
, op0
,
4457 quotient
= expand_unop (int_mode
, neg_optab
,
4458 quotient
, quotient
, 0);
4461 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4463 choose_multiplier (abs_d
, size
, size
- 1,
4464 &ml
, &post_shift
, &lgup
);
4465 if (ml
< HOST_WIDE_INT_1U
<< (size
- 1))
4469 if (post_shift
>= BITS_PER_WORD
4470 || size
- 1 >= BITS_PER_WORD
)
4473 extra_cost
= (shift_cost (speed
, int_mode
, post_shift
)
4474 + shift_cost (speed
, int_mode
, size
- 1)
4475 + add_cost (speed
, int_mode
));
4476 t1
= expmed_mult_highpart
4477 (int_mode
, op0
, gen_int_mode (ml
, int_mode
),
4478 NULL_RTX
, 0, max_cost
- extra_cost
);
4482 (RSHIFT_EXPR
, int_mode
, t1
,
4483 post_shift
, NULL_RTX
, 0);
4485 (RSHIFT_EXPR
, int_mode
, op0
,
4486 size
- 1, NULL_RTX
, 0);
4489 = force_operand (gen_rtx_MINUS (int_mode
, t3
, t2
),
4493 = force_operand (gen_rtx_MINUS (int_mode
, t2
, t3
),
4500 if (post_shift
>= BITS_PER_WORD
4501 || size
- 1 >= BITS_PER_WORD
)
4504 ml
|= HOST_WIDE_INT_M1U
<< (size
- 1);
4505 mlr
= gen_int_mode (ml
, int_mode
);
4506 extra_cost
= (shift_cost (speed
, int_mode
, post_shift
)
4507 + shift_cost (speed
, int_mode
, size
- 1)
4508 + 2 * add_cost (speed
, int_mode
));
4509 t1
= expmed_mult_highpart (int_mode
, op0
, mlr
,
4511 max_cost
- extra_cost
);
4514 t2
= force_operand (gen_rtx_PLUS (int_mode
, t1
, op0
),
4517 (RSHIFT_EXPR
, int_mode
, t2
,
4518 post_shift
, NULL_RTX
, 0);
4520 (RSHIFT_EXPR
, int_mode
, op0
,
4521 size
- 1, NULL_RTX
, 0);
4524 = force_operand (gen_rtx_MINUS (int_mode
, t4
, t3
),
4528 = force_operand (gen_rtx_MINUS (int_mode
, t3
, t4
),
4532 else /* Too wide mode to use tricky code */
4535 insn
= get_last_insn ();
4537 set_dst_reg_note (insn
, REG_EQUAL
,
4538 gen_rtx_DIV (int_mode
, op0
, op1
),
4544 delete_insns_since (last
);
4547 case FLOOR_DIV_EXPR
:
4548 case FLOOR_MOD_EXPR
:
4549 /* We will come here only for signed operations. */
4550 if (op1_is_constant
&& HWI_COMPUTABLE_MODE_P (compute_mode
))
4552 scalar_int_mode int_mode
= as_a
<scalar_int_mode
> (compute_mode
);
4553 int size
= GET_MODE_BITSIZE (int_mode
);
4554 unsigned HOST_WIDE_INT mh
, ml
;
4555 int pre_shift
, lgup
, post_shift
;
4556 HOST_WIDE_INT d
= INTVAL (op1
);
4560 /* We could just as easily deal with negative constants here,
4561 but it does not seem worth the trouble for GCC 2.6. */
4562 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4564 pre_shift
= floor_log2 (d
);
4567 unsigned HOST_WIDE_INT mask
4568 = (HOST_WIDE_INT_1U
<< pre_shift
) - 1;
4569 remainder
= expand_binop
4570 (int_mode
, and_optab
, op0
,
4571 gen_int_mode (mask
, int_mode
),
4572 remainder
, 0, OPTAB_LIB_WIDEN
);
4574 return gen_lowpart (mode
, remainder
);
4576 quotient
= expand_shift
4577 (RSHIFT_EXPR
, int_mode
, op0
,
4578 pre_shift
, tquotient
, 0);
4584 mh
= choose_multiplier (d
, size
, size
- 1,
4585 &ml
, &post_shift
, &lgup
);
4588 if (post_shift
< BITS_PER_WORD
4589 && size
- 1 < BITS_PER_WORD
)
4592 (RSHIFT_EXPR
, int_mode
, op0
,
4593 size
- 1, NULL_RTX
, 0);
4594 t2
= expand_binop (int_mode
, xor_optab
, op0
, t1
,
4595 NULL_RTX
, 0, OPTAB_WIDEN
);
4596 extra_cost
= (shift_cost (speed
, int_mode
, post_shift
)
4597 + shift_cost (speed
, int_mode
, size
- 1)
4598 + 2 * add_cost (speed
, int_mode
));
4599 t3
= expmed_mult_highpart
4600 (int_mode
, t2
, gen_int_mode (ml
, int_mode
),
4601 NULL_RTX
, 1, max_cost
- extra_cost
);
4605 (RSHIFT_EXPR
, int_mode
, t3
,
4606 post_shift
, NULL_RTX
, 1);
4607 quotient
= expand_binop (int_mode
, xor_optab
,
4608 t4
, t1
, tquotient
, 0,
4616 rtx nsign
, t1
, t2
, t3
, t4
;
4617 t1
= force_operand (gen_rtx_PLUS (int_mode
,
4618 op0
, constm1_rtx
), NULL_RTX
);
4619 t2
= expand_binop (int_mode
, ior_optab
, op0
, t1
, NULL_RTX
,
4621 nsign
= expand_shift (RSHIFT_EXPR
, int_mode
, t2
,
4622 size
- 1, NULL_RTX
, 0);
4623 t3
= force_operand (gen_rtx_MINUS (int_mode
, t1
, nsign
),
4625 t4
= expand_divmod (0, TRUNC_DIV_EXPR
, int_mode
, t3
, op1
,
4630 t5
= expand_unop (int_mode
, one_cmpl_optab
, nsign
,
4632 quotient
= force_operand (gen_rtx_PLUS (int_mode
, t4
, t5
),
4640 delete_insns_since (last
);
4642 /* Try using an instruction that produces both the quotient and
4643 remainder, using truncation. We can easily compensate the quotient
4644 or remainder to get floor rounding, once we have the remainder.
4645 Notice that we compute also the final remainder value here,
4646 and return the result right away. */
4647 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4648 target
= gen_reg_rtx (compute_mode
);
4653 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4654 quotient
= gen_reg_rtx (compute_mode
);
4659 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4660 remainder
= gen_reg_rtx (compute_mode
);
4663 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
,
4664 quotient
, remainder
, 0))
4666 /* This could be computed with a branch-less sequence.
4667 Save that for later. */
4669 rtx_code_label
*label
= gen_label_rtx ();
4670 do_cmp_and_jump (remainder
, const0_rtx
, EQ
, compute_mode
, label
);
4671 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4672 NULL_RTX
, 0, OPTAB_WIDEN
);
4673 do_cmp_and_jump (tem
, const0_rtx
, GE
, compute_mode
, label
);
4674 expand_dec (quotient
, const1_rtx
);
4675 expand_inc (remainder
, op1
);
4677 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4680 /* No luck with division elimination or divmod. Have to do it
4681 by conditionally adjusting op0 *and* the result. */
4683 rtx_code_label
*label1
, *label2
, *label3
, *label4
, *label5
;
4687 quotient
= gen_reg_rtx (compute_mode
);
4688 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4689 label1
= gen_label_rtx ();
4690 label2
= gen_label_rtx ();
4691 label3
= gen_label_rtx ();
4692 label4
= gen_label_rtx ();
4693 label5
= gen_label_rtx ();
4694 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4695 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
, compute_mode
, label1
);
4696 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4697 quotient
, 0, OPTAB_LIB_WIDEN
);
4698 if (tem
!= quotient
)
4699 emit_move_insn (quotient
, tem
);
4700 emit_jump_insn (targetm
.gen_jump (label5
));
4702 emit_label (label1
);
4703 expand_inc (adjusted_op0
, const1_rtx
);
4704 emit_jump_insn (targetm
.gen_jump (label4
));
4706 emit_label (label2
);
4707 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
, compute_mode
, label3
);
4708 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4709 quotient
, 0, OPTAB_LIB_WIDEN
);
4710 if (tem
!= quotient
)
4711 emit_move_insn (quotient
, tem
);
4712 emit_jump_insn (targetm
.gen_jump (label5
));
4714 emit_label (label3
);
4715 expand_dec (adjusted_op0
, const1_rtx
);
4716 emit_label (label4
);
4717 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4718 quotient
, 0, OPTAB_LIB_WIDEN
);
4719 if (tem
!= quotient
)
4720 emit_move_insn (quotient
, tem
);
4721 expand_dec (quotient
, const1_rtx
);
4722 emit_label (label5
);
4731 && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4732 && (HWI_COMPUTABLE_MODE_P (compute_mode
)
4733 || INTVAL (op1
) >= 0))
4735 scalar_int_mode int_mode
4736 = as_a
<scalar_int_mode
> (compute_mode
);
4738 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4739 t1
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
4740 floor_log2 (d
), tquotient
, 1);
4741 t2
= expand_binop (int_mode
, and_optab
, op0
,
4742 gen_int_mode (d
- 1, int_mode
),
4743 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4744 t3
= gen_reg_rtx (int_mode
);
4745 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
, int_mode
, 1, 1);
4748 rtx_code_label
*lab
;
4749 lab
= gen_label_rtx ();
4750 do_cmp_and_jump (t2
, const0_rtx
, EQ
, int_mode
, lab
);
4751 expand_inc (t1
, const1_rtx
);
4756 quotient
= force_operand (gen_rtx_PLUS (int_mode
, t1
, t3
),
4761 /* Try using an instruction that produces both the quotient and
4762 remainder, using truncation. We can easily compensate the
4763 quotient or remainder to get ceiling rounding, once we have the
4764 remainder. Notice that we compute also the final remainder
4765 value here, and return the result right away. */
4766 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4767 target
= gen_reg_rtx (compute_mode
);
4771 remainder
= (REG_P (target
)
4772 ? target
: gen_reg_rtx (compute_mode
));
4773 quotient
= gen_reg_rtx (compute_mode
);
4777 quotient
= (REG_P (target
)
4778 ? target
: gen_reg_rtx (compute_mode
));
4779 remainder
= gen_reg_rtx (compute_mode
);
4782 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
,
4785 /* This could be computed with a branch-less sequence.
4786 Save that for later. */
4787 rtx_code_label
*label
= gen_label_rtx ();
4788 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4789 compute_mode
, label
);
4790 expand_inc (quotient
, const1_rtx
);
4791 expand_dec (remainder
, op1
);
4793 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4796 /* No luck with division elimination or divmod. Have to do it
4797 by conditionally adjusting op0 *and* the result. */
4799 rtx_code_label
*label1
, *label2
;
4800 rtx adjusted_op0
, tem
;
4802 quotient
= gen_reg_rtx (compute_mode
);
4803 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4804 label1
= gen_label_rtx ();
4805 label2
= gen_label_rtx ();
4806 do_cmp_and_jump (adjusted_op0
, const0_rtx
, NE
,
4807 compute_mode
, label1
);
4808 emit_move_insn (quotient
, const0_rtx
);
4809 emit_jump_insn (targetm
.gen_jump (label2
));
4811 emit_label (label1
);
4812 expand_dec (adjusted_op0
, const1_rtx
);
4813 tem
= expand_binop (compute_mode
, udiv_optab
, adjusted_op0
, op1
,
4814 quotient
, 1, OPTAB_LIB_WIDEN
);
4815 if (tem
!= quotient
)
4816 emit_move_insn (quotient
, tem
);
4817 expand_inc (quotient
, const1_rtx
);
4818 emit_label (label2
);
4823 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4824 && INTVAL (op1
) >= 0)
4826 /* This is extremely similar to the code for the unsigned case
4827 above. For 2.7 we should merge these variants, but for
4828 2.6.1 I don't want to touch the code for unsigned since that
4829 get used in C. The signed case will only be used by other
4833 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4834 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4835 floor_log2 (d
), tquotient
, 0);
4836 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4837 gen_int_mode (d
- 1, compute_mode
),
4838 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4839 t3
= gen_reg_rtx (compute_mode
);
4840 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4841 compute_mode
, 1, 1);
4844 rtx_code_label
*lab
;
4845 lab
= gen_label_rtx ();
4846 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4847 expand_inc (t1
, const1_rtx
);
4852 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4858 /* Try using an instruction that produces both the quotient and
4859 remainder, using truncation. We can easily compensate the
4860 quotient or remainder to get ceiling rounding, once we have the
4861 remainder. Notice that we compute also the final remainder
4862 value here, and return the result right away. */
4863 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4864 target
= gen_reg_rtx (compute_mode
);
4867 remainder
= (REG_P (target
)
4868 ? target
: gen_reg_rtx (compute_mode
));
4869 quotient
= gen_reg_rtx (compute_mode
);
4873 quotient
= (REG_P (target
)
4874 ? target
: gen_reg_rtx (compute_mode
));
4875 remainder
= gen_reg_rtx (compute_mode
);
4878 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
,
4881 /* This could be computed with a branch-less sequence.
4882 Save that for later. */
4884 rtx_code_label
*label
= gen_label_rtx ();
4885 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4886 compute_mode
, label
);
4887 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4888 NULL_RTX
, 0, OPTAB_WIDEN
);
4889 do_cmp_and_jump (tem
, const0_rtx
, LT
, compute_mode
, label
);
4890 expand_inc (quotient
, const1_rtx
);
4891 expand_dec (remainder
, op1
);
4893 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4896 /* No luck with division elimination or divmod. Have to do it
4897 by conditionally adjusting op0 *and* the result. */
4899 rtx_code_label
*label1
, *label2
, *label3
, *label4
, *label5
;
4903 quotient
= gen_reg_rtx (compute_mode
);
4904 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4905 label1
= gen_label_rtx ();
4906 label2
= gen_label_rtx ();
4907 label3
= gen_label_rtx ();
4908 label4
= gen_label_rtx ();
4909 label5
= gen_label_rtx ();
4910 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4911 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
,
4912 compute_mode
, label1
);
4913 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4914 quotient
, 0, OPTAB_LIB_WIDEN
);
4915 if (tem
!= quotient
)
4916 emit_move_insn (quotient
, tem
);
4917 emit_jump_insn (targetm
.gen_jump (label5
));
4919 emit_label (label1
);
4920 expand_dec (adjusted_op0
, const1_rtx
);
4921 emit_jump_insn (targetm
.gen_jump (label4
));
4923 emit_label (label2
);
4924 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
,
4925 compute_mode
, label3
);
4926 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4927 quotient
, 0, OPTAB_LIB_WIDEN
);
4928 if (tem
!= quotient
)
4929 emit_move_insn (quotient
, tem
);
4930 emit_jump_insn (targetm
.gen_jump (label5
));
4932 emit_label (label3
);
4933 expand_inc (adjusted_op0
, const1_rtx
);
4934 emit_label (label4
);
4935 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4936 quotient
, 0, OPTAB_LIB_WIDEN
);
4937 if (tem
!= quotient
)
4938 emit_move_insn (quotient
, tem
);
4939 expand_inc (quotient
, const1_rtx
);
4940 emit_label (label5
);
4945 case EXACT_DIV_EXPR
:
4946 if (op1_is_constant
&& HWI_COMPUTABLE_MODE_P (compute_mode
))
4948 scalar_int_mode int_mode
= as_a
<scalar_int_mode
> (compute_mode
);
4949 int size
= GET_MODE_BITSIZE (int_mode
);
4950 HOST_WIDE_INT d
= INTVAL (op1
);
4951 unsigned HOST_WIDE_INT ml
;
4955 pre_shift
= ctz_or_zero (d
);
4956 ml
= invert_mod2n (d
>> pre_shift
, size
);
4957 t1
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
4958 pre_shift
, NULL_RTX
, unsignedp
);
4959 quotient
= expand_mult (int_mode
, t1
, gen_int_mode (ml
, int_mode
),
4962 insn
= get_last_insn ();
4963 set_dst_reg_note (insn
, REG_EQUAL
,
4964 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
4965 int_mode
, op0
, op1
),
4970 case ROUND_DIV_EXPR
:
4971 case ROUND_MOD_EXPR
:
4974 scalar_int_mode int_mode
= as_a
<scalar_int_mode
> (compute_mode
);
4976 rtx_code_label
*label
;
4977 label
= gen_label_rtx ();
4978 quotient
= gen_reg_rtx (int_mode
);
4979 remainder
= gen_reg_rtx (int_mode
);
4980 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
, remainder
, 1) == 0)
4983 quotient
= expand_binop (int_mode
, udiv_optab
, op0
, op1
,
4984 quotient
, 1, OPTAB_LIB_WIDEN
);
4985 tem
= expand_mult (int_mode
, quotient
, op1
, NULL_RTX
, 1);
4986 remainder
= expand_binop (int_mode
, sub_optab
, op0
, tem
,
4987 remainder
, 1, OPTAB_LIB_WIDEN
);
4989 tem
= plus_constant (int_mode
, op1
, -1);
4990 tem
= expand_shift (RSHIFT_EXPR
, int_mode
, tem
, 1, NULL_RTX
, 1);
4991 do_cmp_and_jump (remainder
, tem
, LEU
, int_mode
, label
);
4992 expand_inc (quotient
, const1_rtx
);
4993 expand_dec (remainder
, op1
);
4998 scalar_int_mode int_mode
= as_a
<scalar_int_mode
> (compute_mode
);
4999 int size
= GET_MODE_BITSIZE (int_mode
);
5000 rtx abs_rem
, abs_op1
, tem
, mask
;
5001 rtx_code_label
*label
;
5002 label
= gen_label_rtx ();
5003 quotient
= gen_reg_rtx (int_mode
);
5004 remainder
= gen_reg_rtx (int_mode
);
5005 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
, remainder
, 0) == 0)
5008 quotient
= expand_binop (int_mode
, sdiv_optab
, op0
, op1
,
5009 quotient
, 0, OPTAB_LIB_WIDEN
);
5010 tem
= expand_mult (int_mode
, quotient
, op1
, NULL_RTX
, 0);
5011 remainder
= expand_binop (int_mode
, sub_optab
, op0
, tem
,
5012 remainder
, 0, OPTAB_LIB_WIDEN
);
5014 abs_rem
= expand_abs (int_mode
, remainder
, NULL_RTX
, 1, 0);
5015 abs_op1
= expand_abs (int_mode
, op1
, NULL_RTX
, 1, 0);
5016 tem
= expand_shift (LSHIFT_EXPR
, int_mode
, abs_rem
,
5018 do_cmp_and_jump (tem
, abs_op1
, LTU
, int_mode
, label
);
5019 tem
= expand_binop (int_mode
, xor_optab
, op0
, op1
,
5020 NULL_RTX
, 0, OPTAB_WIDEN
);
5021 mask
= expand_shift (RSHIFT_EXPR
, int_mode
, tem
,
5022 size
- 1, NULL_RTX
, 0);
5023 tem
= expand_binop (int_mode
, xor_optab
, mask
, const1_rtx
,
5024 NULL_RTX
, 0, OPTAB_WIDEN
);
5025 tem
= expand_binop (int_mode
, sub_optab
, tem
, mask
,
5026 NULL_RTX
, 0, OPTAB_WIDEN
);
5027 expand_inc (quotient
, tem
);
5028 tem
= expand_binop (int_mode
, xor_optab
, mask
, op1
,
5029 NULL_RTX
, 0, OPTAB_WIDEN
);
5030 tem
= expand_binop (int_mode
, sub_optab
, tem
, mask
,
5031 NULL_RTX
, 0, OPTAB_WIDEN
);
5032 expand_dec (remainder
, tem
);
5035 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
5043 if (target
&& GET_MODE (target
) != compute_mode
)
5048 /* Try to produce the remainder without producing the quotient.
5049 If we seem to have a divmod pattern that does not require widening,
5050 don't try widening here. We should really have a WIDEN argument
5051 to expand_twoval_binop, since what we'd really like to do here is
5052 1) try a mod insn in compute_mode
5053 2) try a divmod insn in compute_mode
5054 3) try a div insn in compute_mode and multiply-subtract to get
5056 4) try the same things with widening allowed. */
5058 = sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
5061 ((optab_handler (optab2
, compute_mode
)
5062 != CODE_FOR_nothing
)
5063 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
5066 /* No luck there. Can we do remainder and divide at once
5067 without a library call? */
5068 remainder
= gen_reg_rtx (compute_mode
);
5069 if (! expand_twoval_binop ((unsignedp
5073 NULL_RTX
, remainder
, unsignedp
))
5078 return gen_lowpart (mode
, remainder
);
5081 /* Produce the quotient. Try a quotient insn, but not a library call.
5082 If we have a divmod in this mode, use it in preference to widening
5083 the div (for this test we assume it will not fail). Note that optab2
5084 is set to the one of the two optabs that the call below will use. */
5086 = sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
5087 op0
, op1
, rem_flag
? NULL_RTX
: target
,
5089 ((optab_handler (optab2
, compute_mode
)
5090 != CODE_FOR_nothing
)
5091 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
5095 /* No luck there. Try a quotient-and-remainder insn,
5096 keeping the quotient alone. */
5097 quotient
= gen_reg_rtx (compute_mode
);
5098 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
5100 quotient
, NULL_RTX
, unsignedp
))
5104 /* Still no luck. If we are not computing the remainder,
5105 use a library call for the quotient. */
5106 quotient
= sign_expand_binop (compute_mode
,
5107 udiv_optab
, sdiv_optab
,
5109 unsignedp
, OPTAB_LIB_WIDEN
);
5116 if (target
&& GET_MODE (target
) != compute_mode
)
5121 /* No divide instruction either. Use library for remainder. */
5122 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
5124 unsignedp
, OPTAB_LIB_WIDEN
);
5125 /* No remainder function. Try a quotient-and-remainder
5126 function, keeping the remainder. */
5129 remainder
= gen_reg_rtx (compute_mode
);
5130 if (!expand_twoval_binop_libfunc
5131 (unsignedp
? udivmod_optab
: sdivmod_optab
,
5133 NULL_RTX
, remainder
,
5134 unsignedp
? UMOD
: MOD
))
5135 remainder
= NULL_RTX
;
5140 /* We divided. Now finish doing X - Y * (X / Y). */
5141 remainder
= expand_mult (compute_mode
, quotient
, op1
,
5142 NULL_RTX
, unsignedp
);
5143 remainder
= expand_binop (compute_mode
, sub_optab
, op0
,
5144 remainder
, target
, unsignedp
,
5149 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
5152 /* Return a tree node with data type TYPE, describing the value of X.
5153 Usually this is an VAR_DECL, if there is no obvious better choice.
5154 X may be an expression, however we only support those expressions
5155 generated by loop.c. */
5158 make_tree (tree type
, rtx x
)
5162 switch (GET_CODE (x
))
5165 case CONST_WIDE_INT
:
5166 t
= wide_int_to_tree (type
, rtx_mode_t (x
, TYPE_MODE (type
)));
5170 STATIC_ASSERT (HOST_BITS_PER_WIDE_INT
* 2 <= MAX_BITSIZE_MODE_ANY_INT
);
5171 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (x
) == VOIDmode
)
5172 t
= wide_int_to_tree (type
,
5173 wide_int::from_array (&CONST_DOUBLE_LOW (x
), 2,
5174 HOST_BITS_PER_WIDE_INT
* 2));
5176 t
= build_real (type
, *CONST_DOUBLE_REAL_VALUE (x
));
5182 int units
= CONST_VECTOR_NUNITS (x
);
5183 tree itype
= TREE_TYPE (type
);
5186 /* Build a tree with vector elements. */
5187 auto_vec
<tree
, 32> elts (units
);
5188 for (i
= 0; i
< units
; ++i
)
5190 rtx elt
= CONST_VECTOR_ELT (x
, i
);
5191 elts
.quick_push (make_tree (itype
, elt
));
5194 return build_vector (type
, elts
);
5198 return fold_build2 (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5199 make_tree (type
, XEXP (x
, 1)));
5202 return fold_build2 (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5203 make_tree (type
, XEXP (x
, 1)));
5206 return fold_build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0)));
5209 return fold_build2 (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5210 make_tree (type
, XEXP (x
, 1)));
5213 return fold_build2 (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5214 make_tree (type
, XEXP (x
, 1)));
5217 t
= unsigned_type_for (type
);
5218 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5219 make_tree (t
, XEXP (x
, 0)),
5220 make_tree (type
, XEXP (x
, 1))));
5223 t
= signed_type_for (type
);
5224 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5225 make_tree (t
, XEXP (x
, 0)),
5226 make_tree (type
, XEXP (x
, 1))));
5229 if (TREE_CODE (type
) != REAL_TYPE
)
5230 t
= signed_type_for (type
);
5234 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5235 make_tree (t
, XEXP (x
, 0)),
5236 make_tree (t
, XEXP (x
, 1))));
5238 t
= unsigned_type_for (type
);
5239 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5240 make_tree (t
, XEXP (x
, 0)),
5241 make_tree (t
, XEXP (x
, 1))));
5245 t
= lang_hooks
.types
.type_for_mode (GET_MODE (XEXP (x
, 0)),
5246 GET_CODE (x
) == ZERO_EXTEND
);
5247 return fold_convert (type
, make_tree (t
, XEXP (x
, 0)));
5251 rtx op
= XEXP (x
, 0);
5252 if (GET_CODE (op
) == VEC_DUPLICATE
)
5254 tree elt_tree
= make_tree (TREE_TYPE (type
), XEXP (op
, 0));
5255 return build_vector_from_val (type
, elt_tree
);
5257 return make_tree (type
, op
);
5261 t
= SYMBOL_REF_DECL (x
);
5263 return fold_convert (type
, build_fold_addr_expr (t
));
5267 t
= build_decl (RTL_LOCATION (x
), VAR_DECL
, NULL_TREE
, type
);
5269 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5270 address mode to pointer mode. */
5271 if (POINTER_TYPE_P (type
))
5272 x
= convert_memory_address_addr_space
5273 (SCALAR_INT_TYPE_MODE (type
), x
, TYPE_ADDR_SPACE (TREE_TYPE (type
)));
5275 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5276 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5277 t
->decl_with_rtl
.rtl
= x
;
5283 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5284 and returning TARGET.
5286 If TARGET is 0, a pseudo-register or constant is returned. */
5289 expand_and (machine_mode mode
, rtx op0
, rtx op1
, rtx target
)
5293 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
5294 tem
= simplify_binary_operation (AND
, mode
, op0
, op1
);
5296 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
5300 else if (tem
!= target
)
5301 emit_move_insn (target
, tem
);
5305 /* Helper function for emit_store_flag. */
5307 emit_cstore (rtx target
, enum insn_code icode
, enum rtx_code code
,
5308 machine_mode mode
, machine_mode compare_mode
,
5309 int unsignedp
, rtx x
, rtx y
, int normalizep
,
5310 machine_mode target_mode
)
5312 struct expand_operand ops
[4];
5313 rtx op0
, comparison
, subtarget
;
5315 scalar_int_mode result_mode
= targetm
.cstore_mode (icode
);
5316 scalar_int_mode int_target_mode
;
5318 last
= get_last_insn ();
5319 x
= prepare_operand (icode
, x
, 2, mode
, compare_mode
, unsignedp
);
5320 y
= prepare_operand (icode
, y
, 3, mode
, compare_mode
, unsignedp
);
5323 delete_insns_since (last
);
5327 if (target_mode
== VOIDmode
)
5328 int_target_mode
= result_mode
;
5330 int_target_mode
= as_a
<scalar_int_mode
> (target_mode
);
5332 target
= gen_reg_rtx (int_target_mode
);
5334 comparison
= gen_rtx_fmt_ee (code
, result_mode
, x
, y
);
5336 create_output_operand (&ops
[0], optimize
? NULL_RTX
: target
, result_mode
);
5337 create_fixed_operand (&ops
[1], comparison
);
5338 create_fixed_operand (&ops
[2], x
);
5339 create_fixed_operand (&ops
[3], y
);
5340 if (!maybe_expand_insn (icode
, 4, ops
))
5342 delete_insns_since (last
);
5345 subtarget
= ops
[0].value
;
5347 /* If we are converting to a wider mode, first convert to
5348 INT_TARGET_MODE, then normalize. This produces better combining
5349 opportunities on machines that have a SIGN_EXTRACT when we are
5350 testing a single bit. This mostly benefits the 68k.
5352 If STORE_FLAG_VALUE does not have the sign bit set when
5353 interpreted in MODE, we can do this conversion as unsigned, which
5354 is usually more efficient. */
5355 if (GET_MODE_SIZE (int_target_mode
) > GET_MODE_SIZE (result_mode
))
5357 convert_move (target
, subtarget
,
5358 val_signbit_known_clear_p (result_mode
,
5361 result_mode
= int_target_mode
;
5366 /* If we want to keep subexpressions around, don't reuse our last
5371 /* Now normalize to the proper value in MODE. Sometimes we don't
5372 have to do anything. */
5373 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
5375 /* STORE_FLAG_VALUE might be the most negative number, so write
5376 the comparison this way to avoid a compiler-time warning. */
5377 else if (- normalizep
== STORE_FLAG_VALUE
)
5378 op0
= expand_unop (result_mode
, neg_optab
, op0
, subtarget
, 0);
5380 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5381 it hard to use a value of just the sign bit due to ANSI integer
5382 constant typing rules. */
5383 else if (val_signbit_known_set_p (result_mode
, STORE_FLAG_VALUE
))
5384 op0
= expand_shift (RSHIFT_EXPR
, result_mode
, op0
,
5385 GET_MODE_BITSIZE (result_mode
) - 1, subtarget
,
5389 gcc_assert (STORE_FLAG_VALUE
& 1);
5391 op0
= expand_and (result_mode
, op0
, const1_rtx
, subtarget
);
5392 if (normalizep
== -1)
5393 op0
= expand_unop (result_mode
, neg_optab
, op0
, op0
, 0);
5396 /* If we were converting to a smaller mode, do the conversion now. */
5397 if (int_target_mode
!= result_mode
)
5399 convert_move (target
, op0
, 0);
5407 /* A subroutine of emit_store_flag only including "tricks" that do not
5408 need a recursive call. These are kept separate to avoid infinite
5412 emit_store_flag_1 (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5413 machine_mode mode
, int unsignedp
, int normalizep
,
5414 machine_mode target_mode
)
5417 enum insn_code icode
;
5418 machine_mode compare_mode
;
5419 enum mode_class mclass
;
5420 enum rtx_code scode
;
5423 code
= unsigned_condition (code
);
5424 scode
= swap_condition (code
);
5426 /* If one operand is constant, make it the second one. Only do this
5427 if the other operand is not constant as well. */
5429 if (swap_commutative_operands_p (op0
, op1
))
5431 std::swap (op0
, op1
);
5432 code
= swap_condition (code
);
5435 if (mode
== VOIDmode
)
5436 mode
= GET_MODE (op0
);
5438 /* For some comparisons with 1 and -1, we can convert this to
5439 comparisons with zero. This will often produce more opportunities for
5440 store-flag insns. */
5445 if (op1
== const1_rtx
)
5446 op1
= const0_rtx
, code
= LE
;
5449 if (op1
== constm1_rtx
)
5450 op1
= const0_rtx
, code
= LT
;
5453 if (op1
== const1_rtx
)
5454 op1
= const0_rtx
, code
= GT
;
5457 if (op1
== constm1_rtx
)
5458 op1
= const0_rtx
, code
= GE
;
5461 if (op1
== const1_rtx
)
5462 op1
= const0_rtx
, code
= NE
;
5465 if (op1
== const1_rtx
)
5466 op1
= const0_rtx
, code
= EQ
;
5472 /* If we are comparing a double-word integer with zero or -1, we can
5473 convert the comparison into one involving a single word. */
5474 scalar_int_mode int_mode
;
5475 if (is_int_mode (mode
, &int_mode
)
5476 && GET_MODE_BITSIZE (int_mode
) == BITS_PER_WORD
* 2
5477 && (!MEM_P (op0
) || ! MEM_VOLATILE_P (op0
)))
5480 if ((code
== EQ
|| code
== NE
)
5481 && (op1
== const0_rtx
|| op1
== constm1_rtx
))
5485 /* Do a logical OR or AND of the two words and compare the
5487 op00
= simplify_gen_subreg (word_mode
, op0
, int_mode
, 0);
5488 op01
= simplify_gen_subreg (word_mode
, op0
, int_mode
, UNITS_PER_WORD
);
5489 tem
= expand_binop (word_mode
,
5490 op1
== const0_rtx
? ior_optab
: and_optab
,
5491 op00
, op01
, NULL_RTX
, unsignedp
,
5495 tem
= emit_store_flag (NULL_RTX
, code
, tem
, op1
, word_mode
,
5496 unsignedp
, normalizep
);
5498 else if ((code
== LT
|| code
== GE
) && op1
== const0_rtx
)
5502 /* If testing the sign bit, can just test on high word. */
5503 op0h
= simplify_gen_subreg (word_mode
, op0
, int_mode
,
5504 subreg_highpart_offset (word_mode
,
5506 tem
= emit_store_flag (NULL_RTX
, code
, op0h
, op1
, word_mode
,
5507 unsignedp
, normalizep
);
5514 if (target_mode
== VOIDmode
|| GET_MODE (tem
) == target_mode
)
5517 target
= gen_reg_rtx (target_mode
);
5519 convert_move (target
, tem
,
5520 !val_signbit_known_set_p (word_mode
,
5521 (normalizep
? normalizep
5522 : STORE_FLAG_VALUE
)));
5527 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5528 complement of A (for GE) and shifting the sign bit to the low bit. */
5529 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
5530 && is_int_mode (mode
, &int_mode
)
5531 && (normalizep
|| STORE_FLAG_VALUE
== 1
5532 || val_signbit_p (int_mode
, STORE_FLAG_VALUE
)))
5534 scalar_int_mode int_target_mode
;
5538 int_target_mode
= int_mode
;
5541 /* If the result is to be wider than OP0, it is best to convert it
5542 first. If it is to be narrower, it is *incorrect* to convert it
5544 int_target_mode
= as_a
<scalar_int_mode
> (target_mode
);
5545 if (GET_MODE_SIZE (int_target_mode
) > GET_MODE_SIZE (int_mode
))
5547 op0
= convert_modes (int_target_mode
, int_mode
, op0
, 0);
5548 int_mode
= int_target_mode
;
5552 if (int_target_mode
!= int_mode
)
5556 op0
= expand_unop (int_mode
, one_cmpl_optab
, op0
,
5557 ((STORE_FLAG_VALUE
== 1 || normalizep
)
5558 ? 0 : subtarget
), 0);
5560 if (STORE_FLAG_VALUE
== 1 || normalizep
)
5561 /* If we are supposed to produce a 0/1 value, we want to do
5562 a logical shift from the sign bit to the low-order bit; for
5563 a -1/0 value, we do an arithmetic shift. */
5564 op0
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
5565 GET_MODE_BITSIZE (int_mode
) - 1,
5566 subtarget
, normalizep
!= -1);
5568 if (int_mode
!= int_target_mode
)
5569 op0
= convert_modes (int_target_mode
, int_mode
, op0
, 0);
5574 mclass
= GET_MODE_CLASS (mode
);
5575 FOR_EACH_MODE_FROM (compare_mode
, mode
)
5577 machine_mode optab_mode
= mclass
== MODE_CC
? CCmode
: compare_mode
;
5578 icode
= optab_handler (cstore_optab
, optab_mode
);
5579 if (icode
!= CODE_FOR_nothing
)
5581 do_pending_stack_adjust ();
5582 rtx tem
= emit_cstore (target
, icode
, code
, mode
, compare_mode
,
5583 unsignedp
, op0
, op1
, normalizep
, target_mode
);
5587 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5589 tem
= emit_cstore (target
, icode
, scode
, mode
, compare_mode
,
5590 unsignedp
, op1
, op0
, normalizep
, target_mode
);
5601 /* Subroutine of emit_store_flag that handles cases in which the operands
5602 are scalar integers. SUBTARGET is the target to use for temporary
5603 operations and TRUEVAL is the value to store when the condition is
5604 true. All other arguments are as for emit_store_flag. */
5607 emit_store_flag_int (rtx target
, rtx subtarget
, enum rtx_code code
, rtx op0
,
5608 rtx op1
, scalar_int_mode mode
, int unsignedp
,
5609 int normalizep
, rtx trueval
)
5611 machine_mode target_mode
= target
? GET_MODE (target
) : VOIDmode
;
5612 rtx_insn
*last
= get_last_insn ();
5614 /* If this is an equality comparison of integers, we can try to exclusive-or
5615 (or subtract) the two operands and use a recursive call to try the
5616 comparison with zero. Don't do any of these cases if branches are
5619 if ((code
== EQ
|| code
== NE
) && op1
!= const0_rtx
)
5621 rtx tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
5625 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
5628 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
5629 mode
, unsignedp
, normalizep
);
5633 delete_insns_since (last
);
5636 /* For integer comparisons, try the reverse comparison. However, for
5637 small X and if we'd have anyway to extend, implementing "X != 0"
5638 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5639 rtx_code rcode
= reverse_condition (code
);
5640 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5641 && ! (optab_handler (cstore_optab
, mode
) == CODE_FOR_nothing
5643 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
5644 && op1
== const0_rtx
))
5646 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5647 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5649 /* Again, for the reverse comparison, use either an addition or a XOR. */
5651 && rtx_cost (GEN_INT (normalizep
), mode
, PLUS
, 1,
5652 optimize_insn_for_speed_p ()) == 0)
5654 rtx tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5655 STORE_FLAG_VALUE
, target_mode
);
5657 tem
= expand_binop (target_mode
, add_optab
, tem
,
5658 gen_int_mode (normalizep
, target_mode
),
5659 target
, 0, OPTAB_WIDEN
);
5664 && rtx_cost (trueval
, mode
, XOR
, 1,
5665 optimize_insn_for_speed_p ()) == 0)
5667 rtx tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5668 normalizep
, target_mode
);
5670 tem
= expand_binop (target_mode
, xor_optab
, tem
, trueval
, target
,
5671 INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5676 delete_insns_since (last
);
5679 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5680 the constant zero. Reject all other comparisons at this point. Only
5681 do LE and GT if branches are expensive since they are expensive on
5682 2-operand machines. */
5684 if (op1
!= const0_rtx
5685 || (code
!= EQ
&& code
!= NE
5686 && (BRANCH_COST (optimize_insn_for_speed_p (),
5687 false) <= 1 || (code
!= LE
&& code
!= GT
))))
5690 /* Try to put the result of the comparison in the sign bit. Assume we can't
5691 do the necessary operation below. */
5695 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5696 the sign bit set. */
5700 /* This is destructive, so SUBTARGET can't be OP0. */
5701 if (rtx_equal_p (subtarget
, op0
))
5704 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
5707 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
5711 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5712 number of bits in the mode of OP0, minus one. */
5716 if (rtx_equal_p (subtarget
, op0
))
5719 tem
= maybe_expand_shift (RSHIFT_EXPR
, mode
, op0
,
5720 GET_MODE_BITSIZE (mode
) - 1,
5723 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
5727 if (code
== EQ
|| code
== NE
)
5729 /* For EQ or NE, one way to do the comparison is to apply an operation
5730 that converts the operand into a positive number if it is nonzero
5731 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5732 for NE we negate. This puts the result in the sign bit. Then we
5733 normalize with a shift, if needed.
5735 Two operations that can do the above actions are ABS and FFS, so try
5736 them. If that doesn't work, and MODE is smaller than a full word,
5737 we can use zero-extension to the wider mode (an unsigned conversion)
5738 as the operation. */
5740 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5741 that is compensated by the subsequent overflow when subtracting
5744 if (optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)
5745 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
5746 else if (optab_handler (ffs_optab
, mode
) != CODE_FOR_nothing
)
5747 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
5748 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
5750 tem
= convert_modes (word_mode
, mode
, op0
, 1);
5757 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
5760 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
5763 /* If we couldn't do it that way, for NE we can "or" the two's complement
5764 of the value with itself. For EQ, we take the one's complement of
5765 that "or", which is an extra insn, so we only handle EQ if branches
5770 || BRANCH_COST (optimize_insn_for_speed_p (),
5773 if (rtx_equal_p (subtarget
, op0
))
5776 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
5777 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
5780 if (tem
&& code
== EQ
)
5781 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
5785 if (tem
&& normalizep
)
5786 tem
= maybe_expand_shift (RSHIFT_EXPR
, mode
, tem
,
5787 GET_MODE_BITSIZE (mode
) - 1,
5788 subtarget
, normalizep
== 1);
5794 else if (GET_MODE (tem
) != target_mode
)
5796 convert_move (target
, tem
, 0);
5799 else if (!subtarget
)
5801 emit_move_insn (target
, tem
);
5806 delete_insns_since (last
);
5811 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5812 and storing in TARGET. Normally return TARGET.
5813 Return 0 if that cannot be done.
5815 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5816 it is VOIDmode, they cannot both be CONST_INT.
5818 UNSIGNEDP is for the case where we have to widen the operands
5819 to perform the operation. It says to use zero-extension.
5821 NORMALIZEP is 1 if we should convert the result to be either zero
5822 or one. Normalize is -1 if we should convert the result to be
5823 either zero or -1. If NORMALIZEP is zero, the result will be left
5824 "raw" out of the scc insn. */
5827 emit_store_flag (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5828 machine_mode mode
, int unsignedp
, int normalizep
)
5830 machine_mode target_mode
= target
? GET_MODE (target
) : VOIDmode
;
5831 enum rtx_code rcode
;
5836 /* If we compare constants, we shouldn't use a store-flag operation,
5837 but a constant load. We can get there via the vanilla route that
5838 usually generates a compare-branch sequence, but will in this case
5839 fold the comparison to a constant, and thus elide the branch. */
5840 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
5843 tem
= emit_store_flag_1 (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
,
5848 /* If we reached here, we can't do this with a scc insn, however there
5849 are some comparisons that can be done in other ways. Don't do any
5850 of these cases if branches are very cheap. */
5851 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5854 /* See what we need to return. We can only return a 1, -1, or the
5857 if (normalizep
== 0)
5859 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
5860 normalizep
= STORE_FLAG_VALUE
;
5862 else if (val_signbit_p (mode
, STORE_FLAG_VALUE
))
5868 last
= get_last_insn ();
5870 /* If optimizing, use different pseudo registers for each insn, instead
5871 of reusing the same pseudo. This leads to better CSE, but slows
5872 down the compiler, since there are more pseudos. */
5873 subtarget
= (!optimize
5874 && (target_mode
== mode
)) ? target
: NULL_RTX
;
5875 trueval
= GEN_INT (normalizep
? normalizep
: STORE_FLAG_VALUE
);
5877 /* For floating-point comparisons, try the reverse comparison or try
5878 changing the "orderedness" of the comparison. */
5879 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5881 enum rtx_code first_code
;
5884 rcode
= reverse_condition_maybe_unordered (code
);
5885 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5886 && (code
== ORDERED
|| code
== UNORDERED
5887 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5888 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5890 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5891 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5893 /* For the reverse comparison, use either an addition or a XOR. */
5895 && rtx_cost (GEN_INT (normalizep
), mode
, PLUS
, 1,
5896 optimize_insn_for_speed_p ()) == 0)
5898 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5899 STORE_FLAG_VALUE
, target_mode
);
5901 return expand_binop (target_mode
, add_optab
, tem
,
5902 gen_int_mode (normalizep
, target_mode
),
5903 target
, 0, OPTAB_WIDEN
);
5906 && rtx_cost (trueval
, mode
, XOR
, 1,
5907 optimize_insn_for_speed_p ()) == 0)
5909 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5910 normalizep
, target_mode
);
5912 return expand_binop (target_mode
, xor_optab
, tem
, trueval
,
5913 target
, INTVAL (trueval
) >= 0,
5918 delete_insns_since (last
);
5920 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5921 if (code
== ORDERED
|| code
== UNORDERED
)
5924 and_them
= split_comparison (code
, mode
, &first_code
, &code
);
5926 /* If there are no NaNs, the first comparison should always fall through.
5927 Effectively change the comparison to the other one. */
5928 if (!HONOR_NANS (mode
))
5930 gcc_assert (first_code
== (and_them
? ORDERED
: UNORDERED
));
5931 return emit_store_flag_1 (target
, code
, op0
, op1
, mode
, 0, normalizep
,
5935 if (!HAVE_conditional_move
)
5938 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5939 conditional move. */
5940 tem
= emit_store_flag_1 (subtarget
, first_code
, op0
, op1
, mode
, 0,
5941 normalizep
, target_mode
);
5946 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5947 tem
, const0_rtx
, GET_MODE (tem
), 0);
5949 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5950 trueval
, tem
, GET_MODE (tem
), 0);
5953 delete_insns_since (last
);
5957 /* The remaining tricks only apply to integer comparisons. */
5959 scalar_int_mode int_mode
;
5960 if (is_int_mode (mode
, &int_mode
))
5961 return emit_store_flag_int (target
, subtarget
, code
, op0
, op1
, int_mode
,
5962 unsignedp
, normalizep
, trueval
);
5967 /* Like emit_store_flag, but always succeeds. */
5970 emit_store_flag_force (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5971 machine_mode mode
, int unsignedp
, int normalizep
)
5974 rtx_code_label
*label
;
5975 rtx trueval
, falseval
;
5977 /* First see if emit_store_flag can do the job. */
5978 tem
= emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
);
5983 target
= gen_reg_rtx (word_mode
);
5985 /* If this failed, we have to do this with set/compare/jump/set code.
5986 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5987 trueval
= normalizep
? GEN_INT (normalizep
) : const1_rtx
;
5989 && GET_MODE_CLASS (mode
) == MODE_INT
5992 && op1
== const0_rtx
)
5994 label
= gen_label_rtx ();
5995 do_compare_rtx_and_jump (target
, const0_rtx
, EQ
, unsignedp
, mode
,
5996 NULL_RTX
, NULL
, label
,
5997 profile_probability::uninitialized ());
5998 emit_move_insn (target
, trueval
);
6004 || reg_mentioned_p (target
, op0
) || reg_mentioned_p (target
, op1
))
6005 target
= gen_reg_rtx (GET_MODE (target
));
6007 /* Jump in the right direction if the target cannot implement CODE
6008 but can jump on its reverse condition. */
6009 falseval
= const0_rtx
;
6010 if (! can_compare_p (code
, mode
, ccp_jump
)
6011 && (! FLOAT_MODE_P (mode
)
6012 || code
== ORDERED
|| code
== UNORDERED
6013 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
6014 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
6016 enum rtx_code rcode
;
6017 if (FLOAT_MODE_P (mode
))
6018 rcode
= reverse_condition_maybe_unordered (code
);
6020 rcode
= reverse_condition (code
);
6022 /* Canonicalize to UNORDERED for the libcall. */
6023 if (can_compare_p (rcode
, mode
, ccp_jump
)
6024 || (code
== ORDERED
&& ! can_compare_p (ORDERED
, mode
, ccp_jump
)))
6027 trueval
= const0_rtx
;
6032 emit_move_insn (target
, trueval
);
6033 label
= gen_label_rtx ();
6034 do_compare_rtx_and_jump (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
, NULL
,
6035 label
, profile_probability::uninitialized ());
6037 emit_move_insn (target
, falseval
);
6043 /* Perform possibly multi-word comparison and conditional jump to LABEL
6044 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
6045 now a thin wrapper around do_compare_rtx_and_jump. */
6048 do_cmp_and_jump (rtx arg1
, rtx arg2
, enum rtx_code op
, machine_mode mode
,
6049 rtx_code_label
*label
)
6051 int unsignedp
= (op
== LTU
|| op
== LEU
|| op
== GTU
|| op
== GEU
);
6052 do_compare_rtx_and_jump (arg1
, arg2
, op
, unsignedp
, mode
, NULL_RTX
,
6053 NULL
, label
, profile_probability::uninitialized ());