1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987-2014 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
26 #include "diagnostic-core.h"
29 #include "stor-layout.h"
32 #include "insn-config.h"
36 #include "langhooks.h"
41 struct target_expmed default_target_expmed
;
43 struct target_expmed
*this_target_expmed
= &default_target_expmed
;
46 static void store_fixed_bit_field (rtx
, unsigned HOST_WIDE_INT
,
47 unsigned HOST_WIDE_INT
,
48 unsigned HOST_WIDE_INT
,
49 unsigned HOST_WIDE_INT
,
51 static void store_fixed_bit_field_1 (rtx
, unsigned HOST_WIDE_INT
,
52 unsigned HOST_WIDE_INT
,
54 static void store_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
55 unsigned HOST_WIDE_INT
,
56 unsigned HOST_WIDE_INT
,
57 unsigned HOST_WIDE_INT
,
59 static rtx
extract_fixed_bit_field (enum machine_mode
, rtx
,
60 unsigned HOST_WIDE_INT
,
61 unsigned HOST_WIDE_INT
, rtx
, int);
62 static rtx
extract_fixed_bit_field_1 (enum machine_mode
, rtx
,
63 unsigned HOST_WIDE_INT
,
64 unsigned HOST_WIDE_INT
, rtx
, int);
65 static rtx
lshift_value (enum machine_mode
, unsigned HOST_WIDE_INT
, int);
66 static rtx
extract_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
67 unsigned HOST_WIDE_INT
, int);
68 static void do_cmp_and_jump (rtx
, rtx
, enum rtx_code
, enum machine_mode
, rtx
);
69 static rtx
expand_smod_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
70 static rtx
expand_sdiv_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
72 /* Return a constant integer mask value of mode MODE with BITSIZE ones
73 followed by BITPOS zeros, or the complement of that if COMPLEMENT.
74 The mask is truncated if necessary to the width of mode MODE. The
75 mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */
78 mask_rtx (enum machine_mode mode
, int bitpos
, int bitsize
, bool complement
)
80 return immed_wide_int_const
81 (wi::shifted_mask (bitpos
, bitsize
, complement
,
82 GET_MODE_PRECISION (mode
)), mode
);
85 /* Test whether a value is zero of a power of two. */
86 #define EXACT_POWER_OF_2_OR_ZERO_P(x) \
87 (((x) & ((x) - (unsigned HOST_WIDE_INT) 1)) == 0)
89 struct init_expmed_rtl
110 rtx pow2
[MAX_BITS_PER_WORD
];
111 rtx cint
[MAX_BITS_PER_WORD
];
115 init_expmed_one_conv (struct init_expmed_rtl
*all
, enum machine_mode to_mode
,
116 enum machine_mode from_mode
, bool speed
)
118 int to_size
, from_size
;
121 /* We're given no information about the true size of a partial integer,
122 only the size of the "full" integer it requires for storage. For
123 comparison purposes here, reduce the bit size by one in that case. */
124 to_size
= (GET_MODE_BITSIZE (to_mode
)
125 - (GET_MODE_CLASS (to_mode
) == MODE_PARTIAL_INT
));
126 from_size
= (GET_MODE_BITSIZE (from_mode
)
127 - (GET_MODE_CLASS (from_mode
) == MODE_PARTIAL_INT
));
129 /* Assume cost of zero-extend and sign-extend is the same. */
130 which
= (to_size
< from_size
? all
->trunc
: all
->zext
);
132 PUT_MODE (all
->reg
, from_mode
);
133 set_convert_cost (to_mode
, from_mode
, speed
, set_src_cost (which
, speed
));
137 init_expmed_one_mode (struct init_expmed_rtl
*all
,
138 enum machine_mode mode
, int speed
)
140 int m
, n
, mode_bitsize
;
141 enum machine_mode mode_from
;
143 mode_bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
145 PUT_MODE (all
->reg
, mode
);
146 PUT_MODE (all
->plus
, mode
);
147 PUT_MODE (all
->neg
, mode
);
148 PUT_MODE (all
->mult
, mode
);
149 PUT_MODE (all
->sdiv
, mode
);
150 PUT_MODE (all
->udiv
, mode
);
151 PUT_MODE (all
->sdiv_32
, mode
);
152 PUT_MODE (all
->smod_32
, mode
);
153 PUT_MODE (all
->wide_trunc
, mode
);
154 PUT_MODE (all
->shift
, mode
);
155 PUT_MODE (all
->shift_mult
, mode
);
156 PUT_MODE (all
->shift_add
, mode
);
157 PUT_MODE (all
->shift_sub0
, mode
);
158 PUT_MODE (all
->shift_sub1
, mode
);
159 PUT_MODE (all
->zext
, mode
);
160 PUT_MODE (all
->trunc
, mode
);
162 set_add_cost (speed
, mode
, set_src_cost (all
->plus
, speed
));
163 set_neg_cost (speed
, mode
, set_src_cost (all
->neg
, speed
));
164 set_mul_cost (speed
, mode
, set_src_cost (all
->mult
, speed
));
165 set_sdiv_cost (speed
, mode
, set_src_cost (all
->sdiv
, speed
));
166 set_udiv_cost (speed
, mode
, set_src_cost (all
->udiv
, speed
));
168 set_sdiv_pow2_cheap (speed
, mode
, (set_src_cost (all
->sdiv_32
, speed
)
169 <= 2 * add_cost (speed
, mode
)));
170 set_smod_pow2_cheap (speed
, mode
, (set_src_cost (all
->smod_32
, speed
)
171 <= 4 * add_cost (speed
, mode
)));
173 set_shift_cost (speed
, mode
, 0, 0);
175 int cost
= add_cost (speed
, mode
);
176 set_shiftadd_cost (speed
, mode
, 0, cost
);
177 set_shiftsub0_cost (speed
, mode
, 0, cost
);
178 set_shiftsub1_cost (speed
, mode
, 0, cost
);
181 n
= MIN (MAX_BITS_PER_WORD
, mode_bitsize
);
182 for (m
= 1; m
< n
; m
++)
184 XEXP (all
->shift
, 1) = all
->cint
[m
];
185 XEXP (all
->shift_mult
, 1) = all
->pow2
[m
];
187 set_shift_cost (speed
, mode
, m
, set_src_cost (all
->shift
, speed
));
188 set_shiftadd_cost (speed
, mode
, m
, set_src_cost (all
->shift_add
, speed
));
189 set_shiftsub0_cost (speed
, mode
, m
, set_src_cost (all
->shift_sub0
, speed
));
190 set_shiftsub1_cost (speed
, mode
, m
, set_src_cost (all
->shift_sub1
, speed
));
193 if (SCALAR_INT_MODE_P (mode
))
195 for (mode_from
= MIN_MODE_INT
; mode_from
<= MAX_MODE_INT
;
196 mode_from
= (enum machine_mode
)(mode_from
+ 1))
197 init_expmed_one_conv (all
, mode
, mode_from
, speed
);
199 if (GET_MODE_CLASS (mode
) == MODE_INT
)
201 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
202 if (wider_mode
!= VOIDmode
)
204 PUT_MODE (all
->zext
, wider_mode
);
205 PUT_MODE (all
->wide_mult
, wider_mode
);
206 PUT_MODE (all
->wide_lshr
, wider_mode
);
207 XEXP (all
->wide_lshr
, 1) = GEN_INT (mode_bitsize
);
209 set_mul_widen_cost (speed
, wider_mode
,
210 set_src_cost (all
->wide_mult
, speed
));
211 set_mul_highpart_cost (speed
, mode
,
212 set_src_cost (all
->wide_trunc
, speed
));
220 struct init_expmed_rtl all
;
221 enum machine_mode mode
= QImode
;
224 memset (&all
, 0, sizeof all
);
225 for (m
= 1; m
< MAX_BITS_PER_WORD
; m
++)
227 all
.pow2
[m
] = GEN_INT ((HOST_WIDE_INT
) 1 << m
);
228 all
.cint
[m
] = GEN_INT (m
);
231 /* Avoid using hard regs in ways which may be unsupported. */
232 all
.reg
= gen_rtx_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
233 all
.plus
= gen_rtx_PLUS (mode
, all
.reg
, all
.reg
);
234 all
.neg
= gen_rtx_NEG (mode
, all
.reg
);
235 all
.mult
= gen_rtx_MULT (mode
, all
.reg
, all
.reg
);
236 all
.sdiv
= gen_rtx_DIV (mode
, all
.reg
, all
.reg
);
237 all
.udiv
= gen_rtx_UDIV (mode
, all
.reg
, all
.reg
);
238 all
.sdiv_32
= gen_rtx_DIV (mode
, all
.reg
, all
.pow2
[5]);
239 all
.smod_32
= gen_rtx_MOD (mode
, all
.reg
, all
.pow2
[5]);
240 all
.zext
= gen_rtx_ZERO_EXTEND (mode
, all
.reg
);
241 all
.wide_mult
= gen_rtx_MULT (mode
, all
.zext
, all
.zext
);
242 all
.wide_lshr
= gen_rtx_LSHIFTRT (mode
, all
.wide_mult
, all
.reg
);
243 all
.wide_trunc
= gen_rtx_TRUNCATE (mode
, all
.wide_lshr
);
244 all
.shift
= gen_rtx_ASHIFT (mode
, all
.reg
, all
.reg
);
245 all
.shift_mult
= gen_rtx_MULT (mode
, all
.reg
, all
.reg
);
246 all
.shift_add
= gen_rtx_PLUS (mode
, all
.shift_mult
, all
.reg
);
247 all
.shift_sub0
= gen_rtx_MINUS (mode
, all
.shift_mult
, all
.reg
);
248 all
.shift_sub1
= gen_rtx_MINUS (mode
, all
.reg
, all
.shift_mult
);
249 all
.trunc
= gen_rtx_TRUNCATE (mode
, all
.reg
);
251 for (speed
= 0; speed
< 2; speed
++)
253 crtl
->maybe_hot_insn_p
= speed
;
254 set_zero_cost (speed
, set_src_cost (const0_rtx
, speed
));
256 for (mode
= MIN_MODE_INT
; mode
<= MAX_MODE_INT
;
257 mode
= (enum machine_mode
)(mode
+ 1))
258 init_expmed_one_mode (&all
, mode
, speed
);
260 if (MIN_MODE_PARTIAL_INT
!= VOIDmode
)
261 for (mode
= MIN_MODE_PARTIAL_INT
; mode
<= MAX_MODE_PARTIAL_INT
;
262 mode
= (enum machine_mode
)(mode
+ 1))
263 init_expmed_one_mode (&all
, mode
, speed
);
265 if (MIN_MODE_VECTOR_INT
!= VOIDmode
)
266 for (mode
= MIN_MODE_VECTOR_INT
; mode
<= MAX_MODE_VECTOR_INT
;
267 mode
= (enum machine_mode
)(mode
+ 1))
268 init_expmed_one_mode (&all
, mode
, speed
);
271 if (alg_hash_used_p ())
273 struct alg_hash_entry
*p
= alg_hash_entry_ptr (0);
274 memset (p
, 0, sizeof (*p
) * NUM_ALG_HASH_ENTRIES
);
277 set_alg_hash_used_p (true);
278 default_rtl_profile ();
280 ggc_free (all
.trunc
);
281 ggc_free (all
.shift_sub1
);
282 ggc_free (all
.shift_sub0
);
283 ggc_free (all
.shift_add
);
284 ggc_free (all
.shift_mult
);
285 ggc_free (all
.shift
);
286 ggc_free (all
.wide_trunc
);
287 ggc_free (all
.wide_lshr
);
288 ggc_free (all
.wide_mult
);
290 ggc_free (all
.smod_32
);
291 ggc_free (all
.sdiv_32
);
300 /* Return an rtx representing minus the value of X.
301 MODE is the intended mode of the result,
302 useful if X is a CONST_INT. */
305 negate_rtx (enum machine_mode mode
, rtx x
)
307 rtx result
= simplify_unary_operation (NEG
, mode
, x
, mode
);
310 result
= expand_unop (mode
, neg_optab
, x
, NULL_RTX
, 0);
315 /* Adjust bitfield memory MEM so that it points to the first unit of mode
316 MODE that contains a bitfield of size BITSIZE at bit position BITNUM.
317 If MODE is BLKmode, return a reference to every byte in the bitfield.
318 Set *NEW_BITNUM to the bit position of the field within the new memory. */
321 narrow_bit_field_mem (rtx mem
, enum machine_mode mode
,
322 unsigned HOST_WIDE_INT bitsize
,
323 unsigned HOST_WIDE_INT bitnum
,
324 unsigned HOST_WIDE_INT
*new_bitnum
)
328 *new_bitnum
= bitnum
% BITS_PER_UNIT
;
329 HOST_WIDE_INT offset
= bitnum
/ BITS_PER_UNIT
;
330 HOST_WIDE_INT size
= ((*new_bitnum
+ bitsize
+ BITS_PER_UNIT
- 1)
332 return adjust_bitfield_address_size (mem
, mode
, offset
, size
);
336 unsigned int unit
= GET_MODE_BITSIZE (mode
);
337 *new_bitnum
= bitnum
% unit
;
338 HOST_WIDE_INT offset
= (bitnum
- *new_bitnum
) / BITS_PER_UNIT
;
339 return adjust_bitfield_address (mem
, mode
, offset
);
343 /* The caller wants to perform insertion or extraction PATTERN on a
344 bitfield of size BITSIZE at BITNUM bits into memory operand OP0.
345 BITREGION_START and BITREGION_END are as for store_bit_field
346 and FIELDMODE is the natural mode of the field.
348 Search for a mode that is compatible with the memory access
349 restrictions and (where applicable) with a register insertion or
350 extraction. Return the new memory on success, storing the adjusted
351 bit position in *NEW_BITNUM. Return null otherwise. */
354 adjust_bit_field_mem_for_reg (enum extraction_pattern pattern
,
355 rtx op0
, HOST_WIDE_INT bitsize
,
356 HOST_WIDE_INT bitnum
,
357 unsigned HOST_WIDE_INT bitregion_start
,
358 unsigned HOST_WIDE_INT bitregion_end
,
359 enum machine_mode fieldmode
,
360 unsigned HOST_WIDE_INT
*new_bitnum
)
362 bit_field_mode_iterator
iter (bitsize
, bitnum
, bitregion_start
,
363 bitregion_end
, MEM_ALIGN (op0
),
364 MEM_VOLATILE_P (op0
));
365 enum machine_mode best_mode
;
366 if (iter
.next_mode (&best_mode
))
368 /* We can use a memory in BEST_MODE. See whether this is true for
369 any wider modes. All other things being equal, we prefer to
370 use the widest mode possible because it tends to expose more
371 CSE opportunities. */
372 if (!iter
.prefer_smaller_modes ())
374 /* Limit the search to the mode required by the corresponding
375 register insertion or extraction instruction, if any. */
376 enum machine_mode limit_mode
= word_mode
;
377 extraction_insn insn
;
378 if (get_best_reg_extraction_insn (&insn
, pattern
,
379 GET_MODE_BITSIZE (best_mode
),
381 limit_mode
= insn
.field_mode
;
383 enum machine_mode wider_mode
;
384 while (iter
.next_mode (&wider_mode
)
385 && GET_MODE_SIZE (wider_mode
) <= GET_MODE_SIZE (limit_mode
))
386 best_mode
= wider_mode
;
388 return narrow_bit_field_mem (op0
, best_mode
, bitsize
, bitnum
,
394 /* Return true if a bitfield of size BITSIZE at bit number BITNUM within
395 a structure of mode STRUCT_MODE represents a lowpart subreg. The subreg
396 offset is then BITNUM / BITS_PER_UNIT. */
399 lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum
,
400 unsigned HOST_WIDE_INT bitsize
,
401 enum machine_mode struct_mode
)
403 if (BYTES_BIG_ENDIAN
)
404 return (bitnum
% BITS_PER_UNIT
== 0
405 && (bitnum
+ bitsize
== GET_MODE_BITSIZE (struct_mode
)
406 || (bitnum
+ bitsize
) % BITS_PER_WORD
== 0));
408 return bitnum
% BITS_PER_WORD
== 0;
411 /* Return true if -fstrict-volatile-bitfields applies to an access of OP0
412 containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.
413 Return false if the access would touch memory outside the range
414 BITREGION_START to BITREGION_END for conformance to the C++ memory
418 strict_volatile_bitfield_p (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
419 unsigned HOST_WIDE_INT bitnum
,
420 enum machine_mode fieldmode
,
421 unsigned HOST_WIDE_INT bitregion_start
,
422 unsigned HOST_WIDE_INT bitregion_end
)
424 unsigned HOST_WIDE_INT modesize
= GET_MODE_BITSIZE (fieldmode
);
426 /* -fstrict-volatile-bitfields must be enabled and we must have a
429 || !MEM_VOLATILE_P (op0
)
430 || flag_strict_volatile_bitfields
<= 0)
433 /* Non-integral modes likely only happen with packed structures.
435 if (!SCALAR_INT_MODE_P (fieldmode
))
438 /* The bit size must not be larger than the field mode, and
439 the field mode must not be larger than a word. */
440 if (bitsize
> modesize
|| modesize
> BITS_PER_WORD
)
443 /* Check for cases of unaligned fields that must be split. */
444 if (bitnum
% BITS_PER_UNIT
+ bitsize
> modesize
446 && bitnum
% GET_MODE_ALIGNMENT (fieldmode
) + bitsize
> modesize
))
449 /* Check for cases where the C++ memory model applies. */
450 if (bitregion_end
!= 0
451 && (bitnum
- bitnum
% modesize
< bitregion_start
452 || bitnum
- bitnum
% modesize
+ modesize
> bitregion_end
))
458 /* Return true if OP is a memory and if a bitfield of size BITSIZE at
459 bit number BITNUM can be treated as a simple value of mode MODE. */
462 simple_mem_bitfield_p (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
463 unsigned HOST_WIDE_INT bitnum
, enum machine_mode mode
)
466 && bitnum
% BITS_PER_UNIT
== 0
467 && bitsize
== GET_MODE_BITSIZE (mode
)
468 && (!SLOW_UNALIGNED_ACCESS (mode
, MEM_ALIGN (op0
))
469 || (bitnum
% GET_MODE_ALIGNMENT (mode
) == 0
470 && MEM_ALIGN (op0
) >= GET_MODE_ALIGNMENT (mode
))));
473 /* Try to use instruction INSV to store VALUE into a field of OP0.
474 BITSIZE and BITNUM are as for store_bit_field. */
477 store_bit_field_using_insv (const extraction_insn
*insv
, rtx op0
,
478 unsigned HOST_WIDE_INT bitsize
,
479 unsigned HOST_WIDE_INT bitnum
,
482 struct expand_operand ops
[4];
485 rtx last
= get_last_insn ();
486 bool copy_back
= false;
488 enum machine_mode op_mode
= insv
->field_mode
;
489 unsigned int unit
= GET_MODE_BITSIZE (op_mode
);
490 if (bitsize
== 0 || bitsize
> unit
)
494 /* Get a reference to the first byte of the field. */
495 xop0
= narrow_bit_field_mem (xop0
, insv
->struct_mode
, bitsize
, bitnum
,
499 /* Convert from counting within OP0 to counting in OP_MODE. */
500 if (BYTES_BIG_ENDIAN
)
501 bitnum
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
503 /* If xop0 is a register, we need it in OP_MODE
504 to make it acceptable to the format of insv. */
505 if (GET_CODE (xop0
) == SUBREG
)
506 /* We can't just change the mode, because this might clobber op0,
507 and we will need the original value of op0 if insv fails. */
508 xop0
= gen_rtx_SUBREG (op_mode
, SUBREG_REG (xop0
), SUBREG_BYTE (xop0
));
509 if (REG_P (xop0
) && GET_MODE (xop0
) != op_mode
)
510 xop0
= gen_lowpart_SUBREG (op_mode
, xop0
);
513 /* If the destination is a paradoxical subreg such that we need a
514 truncate to the inner mode, perform the insertion on a temporary and
515 truncate the result to the original destination. Note that we can't
516 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
517 X) 0)) is (reg:N X). */
518 if (GET_CODE (xop0
) == SUBREG
519 && REG_P (SUBREG_REG (xop0
))
520 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0
)),
523 rtx tem
= gen_reg_rtx (op_mode
);
524 emit_move_insn (tem
, xop0
);
529 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
530 "backwards" from the size of the unit we are inserting into.
531 Otherwise, we count bits from the most significant on a
532 BYTES/BITS_BIG_ENDIAN machine. */
534 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
535 bitnum
= unit
- bitsize
- bitnum
;
537 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
539 if (GET_MODE (value
) != op_mode
)
541 if (GET_MODE_BITSIZE (GET_MODE (value
)) >= bitsize
)
543 /* Optimization: Don't bother really extending VALUE
544 if it has all the bits we will actually use. However,
545 if we must narrow it, be sure we do it correctly. */
547 if (GET_MODE_SIZE (GET_MODE (value
)) < GET_MODE_SIZE (op_mode
))
551 tmp
= simplify_subreg (op_mode
, value1
, GET_MODE (value
), 0);
553 tmp
= simplify_gen_subreg (op_mode
,
554 force_reg (GET_MODE (value
),
556 GET_MODE (value
), 0);
560 value1
= gen_lowpart (op_mode
, value1
);
562 else if (CONST_INT_P (value
))
563 value1
= gen_int_mode (INTVAL (value
), op_mode
);
565 /* Parse phase is supposed to make VALUE's data type
566 match that of the component reference, which is a type
567 at least as wide as the field; so VALUE should have
568 a mode that corresponds to that type. */
569 gcc_assert (CONSTANT_P (value
));
572 create_fixed_operand (&ops
[0], xop0
);
573 create_integer_operand (&ops
[1], bitsize
);
574 create_integer_operand (&ops
[2], bitnum
);
575 create_input_operand (&ops
[3], value1
, op_mode
);
576 if (maybe_expand_insn (insv
->icode
, 4, ops
))
579 convert_move (op0
, xop0
, true);
582 delete_insns_since (last
);
586 /* A subroutine of store_bit_field, with the same arguments. Return true
587 if the operation could be implemented.
589 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
590 no other way of implementing the operation. If FALLBACK_P is false,
591 return false instead. */
594 store_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
595 unsigned HOST_WIDE_INT bitnum
,
596 unsigned HOST_WIDE_INT bitregion_start
,
597 unsigned HOST_WIDE_INT bitregion_end
,
598 enum machine_mode fieldmode
,
599 rtx value
, bool fallback_p
)
604 while (GET_CODE (op0
) == SUBREG
)
606 /* The following line once was done only if WORDS_BIG_ENDIAN,
607 but I think that is a mistake. WORDS_BIG_ENDIAN is
608 meaningful at a much higher level; when structures are copied
609 between memory and regs, the higher-numbered regs
610 always get higher addresses. */
611 int inner_mode_size
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)));
612 int outer_mode_size
= GET_MODE_SIZE (GET_MODE (op0
));
615 /* Paradoxical subregs need special handling on big endian machines. */
616 if (SUBREG_BYTE (op0
) == 0 && inner_mode_size
< outer_mode_size
)
618 int difference
= inner_mode_size
- outer_mode_size
;
620 if (WORDS_BIG_ENDIAN
)
621 byte_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
622 if (BYTES_BIG_ENDIAN
)
623 byte_offset
+= difference
% UNITS_PER_WORD
;
626 byte_offset
= SUBREG_BYTE (op0
);
628 bitnum
+= byte_offset
* BITS_PER_UNIT
;
629 op0
= SUBREG_REG (op0
);
632 /* No action is needed if the target is a register and if the field
633 lies completely outside that register. This can occur if the source
634 code contains an out-of-bounds access to a small array. */
635 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
638 /* Use vec_set patterns for inserting parts of vectors whenever
640 if (VECTOR_MODE_P (GET_MODE (op0
))
642 && optab_handler (vec_set_optab
, GET_MODE (op0
)) != CODE_FOR_nothing
643 && fieldmode
== GET_MODE_INNER (GET_MODE (op0
))
644 && bitsize
== GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
645 && !(bitnum
% GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
647 struct expand_operand ops
[3];
648 enum machine_mode outermode
= GET_MODE (op0
);
649 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
650 enum insn_code icode
= optab_handler (vec_set_optab
, outermode
);
651 int pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
653 create_fixed_operand (&ops
[0], op0
);
654 create_input_operand (&ops
[1], value
, innermode
);
655 create_integer_operand (&ops
[2], pos
);
656 if (maybe_expand_insn (icode
, 3, ops
))
660 /* If the target is a register, overwriting the entire object, or storing
661 a full-word or multi-word field can be done with just a SUBREG. */
663 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
664 && ((bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)) && bitnum
== 0)
665 || (bitsize
% BITS_PER_WORD
== 0 && bitnum
% BITS_PER_WORD
== 0)))
667 /* Use the subreg machinery either to narrow OP0 to the required
668 words or to cope with mode punning between equal-sized modes.
669 In the latter case, use subreg on the rhs side, not lhs. */
672 if (bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
674 sub
= simplify_gen_subreg (GET_MODE (op0
), value
, fieldmode
, 0);
677 emit_move_insn (op0
, sub
);
683 sub
= simplify_gen_subreg (fieldmode
, op0
, GET_MODE (op0
),
684 bitnum
/ BITS_PER_UNIT
);
687 emit_move_insn (sub
, value
);
693 /* If the target is memory, storing any naturally aligned field can be
694 done with a simple store. For targets that support fast unaligned
695 memory, any naturally sized, unit aligned field can be done directly. */
696 if (simple_mem_bitfield_p (op0
, bitsize
, bitnum
, fieldmode
))
698 op0
= adjust_bitfield_address (op0
, fieldmode
, bitnum
/ BITS_PER_UNIT
);
699 emit_move_insn (op0
, value
);
703 /* Make sure we are playing with integral modes. Pun with subregs
704 if we aren't. This must come after the entire register case above,
705 since that case is valid for any mode. The following cases are only
706 valid for integral modes. */
708 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
709 if (imode
!= GET_MODE (op0
))
712 op0
= adjust_bitfield_address_size (op0
, imode
, 0, MEM_SIZE (op0
));
715 gcc_assert (imode
!= BLKmode
);
716 op0
= gen_lowpart (imode
, op0
);
721 /* Storing an lsb-aligned field in a register
722 can be done with a movstrict instruction. */
725 && lowpart_bit_field_p (bitnum
, bitsize
, GET_MODE (op0
))
726 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
727 && optab_handler (movstrict_optab
, fieldmode
) != CODE_FOR_nothing
)
729 struct expand_operand ops
[2];
730 enum insn_code icode
= optab_handler (movstrict_optab
, fieldmode
);
732 unsigned HOST_WIDE_INT subreg_off
;
734 if (GET_CODE (arg0
) == SUBREG
)
736 /* Else we've got some float mode source being extracted into
737 a different float mode destination -- this combination of
738 subregs results in Severe Tire Damage. */
739 gcc_assert (GET_MODE (SUBREG_REG (arg0
)) == fieldmode
740 || GET_MODE_CLASS (fieldmode
) == MODE_INT
741 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
);
742 arg0
= SUBREG_REG (arg0
);
745 subreg_off
= bitnum
/ BITS_PER_UNIT
;
746 if (validate_subreg (fieldmode
, GET_MODE (arg0
), arg0
, subreg_off
))
748 arg0
= gen_rtx_SUBREG (fieldmode
, arg0
, subreg_off
);
750 create_fixed_operand (&ops
[0], arg0
);
751 /* Shrink the source operand to FIELDMODE. */
752 create_convert_operand_to (&ops
[1], value
, fieldmode
, false);
753 if (maybe_expand_insn (icode
, 2, ops
))
758 /* Handle fields bigger than a word. */
760 if (bitsize
> BITS_PER_WORD
)
762 /* Here we transfer the words of the field
763 in the order least significant first.
764 This is because the most significant word is the one which may
766 However, only do that if the value is not BLKmode. */
768 unsigned int backwards
= WORDS_BIG_ENDIAN
&& fieldmode
!= BLKmode
;
769 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
773 /* This is the mode we must force value to, so that there will be enough
774 subwords to extract. Note that fieldmode will often (always?) be
775 VOIDmode, because that is what store_field uses to indicate that this
776 is a bit field, but passing VOIDmode to operand_subword_force
778 fieldmode
= GET_MODE (value
);
779 if (fieldmode
== VOIDmode
)
780 fieldmode
= smallest_mode_for_size (nwords
* BITS_PER_WORD
, MODE_INT
);
782 last
= get_last_insn ();
783 for (i
= 0; i
< nwords
; i
++)
785 /* If I is 0, use the low-order word in both field and target;
786 if I is 1, use the next to lowest word; and so on. */
787 unsigned int wordnum
= (backwards
788 ? GET_MODE_SIZE (fieldmode
) / UNITS_PER_WORD
791 unsigned int bit_offset
= (backwards
792 ? MAX ((int) bitsize
- ((int) i
+ 1)
795 : (int) i
* BITS_PER_WORD
);
796 rtx value_word
= operand_subword_force (value
, wordnum
, fieldmode
);
797 unsigned HOST_WIDE_INT new_bitsize
=
798 MIN (BITS_PER_WORD
, bitsize
- i
* BITS_PER_WORD
);
800 /* If the remaining chunk doesn't have full wordsize we have
801 to make sure that for big endian machines the higher order
803 if (new_bitsize
< BITS_PER_WORD
&& BYTES_BIG_ENDIAN
&& !backwards
)
804 value_word
= simplify_expand_binop (word_mode
, lshr_optab
,
806 GEN_INT (BITS_PER_WORD
811 if (!store_bit_field_1 (op0
, new_bitsize
,
813 bitregion_start
, bitregion_end
,
815 value_word
, fallback_p
))
817 delete_insns_since (last
);
824 /* If VALUE has a floating-point or complex mode, access it as an
825 integer of the corresponding size. This can occur on a machine
826 with 64 bit registers that uses SFmode for float. It can also
827 occur for unaligned float or complex fields. */
829 if (GET_MODE (value
) != VOIDmode
830 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_INT
831 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_PARTIAL_INT
)
833 value
= gen_reg_rtx (int_mode_for_mode (GET_MODE (value
)));
834 emit_move_insn (gen_lowpart (GET_MODE (orig_value
), value
), orig_value
);
837 /* If OP0 is a multi-word register, narrow it to the affected word.
838 If the region spans two words, defer to store_split_bit_field. */
839 if (!MEM_P (op0
) && GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
841 op0
= simplify_gen_subreg (word_mode
, op0
, GET_MODE (op0
),
842 bitnum
/ BITS_PER_WORD
* UNITS_PER_WORD
);
844 bitnum
%= BITS_PER_WORD
;
845 if (bitnum
+ bitsize
> BITS_PER_WORD
)
850 store_split_bit_field (op0
, bitsize
, bitnum
, bitregion_start
,
851 bitregion_end
, value
);
856 /* From here on we can assume that the field to be stored in fits
857 within a word. If the destination is a register, it too fits
860 extraction_insn insv
;
862 && get_best_reg_extraction_insn (&insv
, EP_insv
,
863 GET_MODE_BITSIZE (GET_MODE (op0
)),
865 && store_bit_field_using_insv (&insv
, op0
, bitsize
, bitnum
, value
))
868 /* If OP0 is a memory, try copying it to a register and seeing if a
869 cheap register alternative is available. */
872 if (get_best_mem_extraction_insn (&insv
, EP_insv
, bitsize
, bitnum
,
874 && store_bit_field_using_insv (&insv
, op0
, bitsize
, bitnum
, value
))
877 rtx last
= get_last_insn ();
879 /* Try loading part of OP0 into a register, inserting the bitfield
880 into that, and then copying the result back to OP0. */
881 unsigned HOST_WIDE_INT bitpos
;
882 rtx xop0
= adjust_bit_field_mem_for_reg (EP_insv
, op0
, bitsize
, bitnum
,
883 bitregion_start
, bitregion_end
,
887 rtx tempreg
= copy_to_reg (xop0
);
888 if (store_bit_field_1 (tempreg
, bitsize
, bitpos
,
889 bitregion_start
, bitregion_end
,
890 fieldmode
, orig_value
, false))
892 emit_move_insn (xop0
, tempreg
);
895 delete_insns_since (last
);
902 store_fixed_bit_field (op0
, bitsize
, bitnum
, bitregion_start
,
903 bitregion_end
, value
);
907 /* Generate code to store value from rtx VALUE
908 into a bit-field within structure STR_RTX
909 containing BITSIZE bits starting at bit BITNUM.
911 BITREGION_START is bitpos of the first bitfield in this region.
912 BITREGION_END is the bitpos of the ending bitfield in this region.
913 These two fields are 0, if the C++ memory model does not apply,
914 or we are not interested in keeping track of bitfield regions.
916 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
919 store_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
920 unsigned HOST_WIDE_INT bitnum
,
921 unsigned HOST_WIDE_INT bitregion_start
,
922 unsigned HOST_WIDE_INT bitregion_end
,
923 enum machine_mode fieldmode
,
926 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
927 if (strict_volatile_bitfield_p (str_rtx
, bitsize
, bitnum
, fieldmode
,
928 bitregion_start
, bitregion_end
))
930 /* Storing any naturally aligned field can be done with a simple
931 store. For targets that support fast unaligned memory, any
932 naturally sized, unit aligned field can be done directly. */
933 if (simple_mem_bitfield_p (str_rtx
, bitsize
, bitnum
, fieldmode
))
935 str_rtx
= adjust_bitfield_address (str_rtx
, fieldmode
,
936 bitnum
/ BITS_PER_UNIT
);
937 emit_move_insn (str_rtx
, value
);
941 str_rtx
= narrow_bit_field_mem (str_rtx
, fieldmode
, bitsize
, bitnum
,
943 /* Explicitly override the C/C++ memory model; ignore the
944 bit range so that we can do the access in the mode mandated
945 by -fstrict-volatile-bitfields instead. */
946 store_fixed_bit_field_1 (str_rtx
, bitsize
, bitnum
, value
);
952 /* Under the C++0x memory model, we must not touch bits outside the
953 bit region. Adjust the address to start at the beginning of the
955 if (MEM_P (str_rtx
) && bitregion_start
> 0)
957 enum machine_mode bestmode
;
958 HOST_WIDE_INT offset
, size
;
960 gcc_assert ((bitregion_start
% BITS_PER_UNIT
) == 0);
962 offset
= bitregion_start
/ BITS_PER_UNIT
;
963 bitnum
-= bitregion_start
;
964 size
= (bitnum
+ bitsize
+ BITS_PER_UNIT
- 1) / BITS_PER_UNIT
;
965 bitregion_end
-= bitregion_start
;
967 bestmode
= get_best_mode (bitsize
, bitnum
,
968 bitregion_start
, bitregion_end
,
969 MEM_ALIGN (str_rtx
), VOIDmode
,
970 MEM_VOLATILE_P (str_rtx
));
971 str_rtx
= adjust_bitfield_address_size (str_rtx
, bestmode
, offset
, size
);
974 if (!store_bit_field_1 (str_rtx
, bitsize
, bitnum
,
975 bitregion_start
, bitregion_end
,
976 fieldmode
, value
, true))
980 /* Use shifts and boolean operations to store VALUE into a bit field of
981 width BITSIZE in OP0, starting at bit BITNUM. */
984 store_fixed_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
985 unsigned HOST_WIDE_INT bitnum
,
986 unsigned HOST_WIDE_INT bitregion_start
,
987 unsigned HOST_WIDE_INT bitregion_end
,
990 /* There is a case not handled here:
991 a structure with a known alignment of just a halfword
992 and a field split across two aligned halfwords within the structure.
993 Or likewise a structure with a known alignment of just a byte
994 and a field split across two bytes.
995 Such cases are not supposed to be able to occur. */
999 enum machine_mode mode
= GET_MODE (op0
);
1000 if (GET_MODE_BITSIZE (mode
) == 0
1001 || GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (word_mode
))
1003 mode
= get_best_mode (bitsize
, bitnum
, bitregion_start
, bitregion_end
,
1004 MEM_ALIGN (op0
), mode
, MEM_VOLATILE_P (op0
));
1006 if (mode
== VOIDmode
)
1008 /* The only way this should occur is if the field spans word
1010 store_split_bit_field (op0
, bitsize
, bitnum
, bitregion_start
,
1011 bitregion_end
, value
);
1015 op0
= narrow_bit_field_mem (op0
, mode
, bitsize
, bitnum
, &bitnum
);
1018 store_fixed_bit_field_1 (op0
, bitsize
, bitnum
, value
);
1021 /* Helper function for store_fixed_bit_field, stores
1022 the bit field always using the MODE of OP0. */
1025 store_fixed_bit_field_1 (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1026 unsigned HOST_WIDE_INT bitnum
,
1029 enum machine_mode mode
;
1034 mode
= GET_MODE (op0
);
1035 gcc_assert (SCALAR_INT_MODE_P (mode
));
1037 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1038 for invalid input, such as f5 from gcc.dg/pr48335-2.c. */
1040 if (BYTES_BIG_ENDIAN
)
1041 /* BITNUM is the distance between our msb
1042 and that of the containing datum.
1043 Convert it to the distance from the lsb. */
1044 bitnum
= GET_MODE_BITSIZE (mode
) - bitsize
- bitnum
;
1046 /* Now BITNUM is always the distance between our lsb
1049 /* Shift VALUE left by BITNUM bits. If VALUE is not constant,
1050 we must first convert its mode to MODE. */
1052 if (CONST_INT_P (value
))
1054 HOST_WIDE_INT v
= INTVAL (value
);
1056 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
1057 v
&= ((HOST_WIDE_INT
) 1 << bitsize
) - 1;
1061 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
1062 && v
== ((HOST_WIDE_INT
) 1 << bitsize
) - 1)
1063 || (bitsize
== HOST_BITS_PER_WIDE_INT
&& v
== -1))
1066 value
= lshift_value (mode
, v
, bitnum
);
1070 int must_and
= (GET_MODE_BITSIZE (GET_MODE (value
)) != bitsize
1071 && bitnum
+ bitsize
!= GET_MODE_BITSIZE (mode
));
1073 if (GET_MODE (value
) != mode
)
1074 value
= convert_to_mode (mode
, value
, 1);
1077 value
= expand_binop (mode
, and_optab
, value
,
1078 mask_rtx (mode
, 0, bitsize
, 0),
1079 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1081 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
1082 bitnum
, NULL_RTX
, 1);
1085 /* Now clear the chosen bits in OP0,
1086 except that if VALUE is -1 we need not bother. */
1087 /* We keep the intermediates in registers to allow CSE to combine
1088 consecutive bitfield assignments. */
1090 temp
= force_reg (mode
, op0
);
1094 temp
= expand_binop (mode
, and_optab
, temp
,
1095 mask_rtx (mode
, bitnum
, bitsize
, 1),
1096 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1097 temp
= force_reg (mode
, temp
);
1100 /* Now logical-or VALUE into OP0, unless it is zero. */
1104 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
1105 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1106 temp
= force_reg (mode
, temp
);
1111 op0
= copy_rtx (op0
);
1112 emit_move_insn (op0
, temp
);
1116 /* Store a bit field that is split across multiple accessible memory objects.
1118 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1119 BITSIZE is the field width; BITPOS the position of its first bit
1121 VALUE is the value to store.
1123 This does not yet handle fields wider than BITS_PER_WORD. */
1126 store_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1127 unsigned HOST_WIDE_INT bitpos
,
1128 unsigned HOST_WIDE_INT bitregion_start
,
1129 unsigned HOST_WIDE_INT bitregion_end
,
1133 unsigned int bitsdone
= 0;
1135 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1137 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1138 unit
= BITS_PER_WORD
;
1140 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1142 /* If OP0 is a memory with a mode, then UNIT must not be larger than
1143 OP0's mode as well. Otherwise, store_fixed_bit_field will call us
1144 again, and we will mutually recurse forever. */
1145 if (MEM_P (op0
) && GET_MODE_BITSIZE (GET_MODE (op0
)) > 0)
1146 unit
= MIN (unit
, GET_MODE_BITSIZE (GET_MODE (op0
)));
1148 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1149 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1150 that VALUE might be a floating-point constant. */
1151 if (CONSTANT_P (value
) && !CONST_INT_P (value
))
1153 rtx word
= gen_lowpart_common (word_mode
, value
);
1155 if (word
&& (value
!= word
))
1158 value
= gen_lowpart_common (word_mode
,
1159 force_reg (GET_MODE (value
) != VOIDmode
1161 : word_mode
, value
));
1164 while (bitsdone
< bitsize
)
1166 unsigned HOST_WIDE_INT thissize
;
1168 unsigned HOST_WIDE_INT thispos
;
1169 unsigned HOST_WIDE_INT offset
;
1171 offset
= (bitpos
+ bitsdone
) / unit
;
1172 thispos
= (bitpos
+ bitsdone
) % unit
;
1174 /* When region of bytes we can touch is restricted, decrease
1175 UNIT close to the end of the region as needed. If op0 is a REG
1176 or SUBREG of REG, don't do this, as there can't be data races
1177 on a register and we can expand shorter code in some cases. */
1179 && unit
> BITS_PER_UNIT
1180 && bitpos
+ bitsdone
- thispos
+ unit
> bitregion_end
+ 1
1182 && (GET_CODE (op0
) != SUBREG
|| !REG_P (SUBREG_REG (op0
))))
1188 /* THISSIZE must not overrun a word boundary. Otherwise,
1189 store_fixed_bit_field will call us again, and we will mutually
1191 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1192 thissize
= MIN (thissize
, unit
- thispos
);
1194 if (BYTES_BIG_ENDIAN
)
1196 /* Fetch successively less significant portions. */
1197 if (CONST_INT_P (value
))
1198 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1199 >> (bitsize
- bitsdone
- thissize
))
1200 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1203 int total_bits
= GET_MODE_BITSIZE (GET_MODE (value
));
1204 /* The args are chosen so that the last part includes the
1205 lsb. Give extract_bit_field the value it needs (with
1206 endianness compensation) to fetch the piece we want. */
1207 part
= extract_fixed_bit_field (word_mode
, value
, thissize
,
1208 total_bits
- bitsize
+ bitsdone
,
1214 /* Fetch successively more significant portions. */
1215 if (CONST_INT_P (value
))
1216 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1218 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1220 part
= extract_fixed_bit_field (word_mode
, value
, thissize
,
1221 bitsdone
, NULL_RTX
, 1);
1224 /* If OP0 is a register, then handle OFFSET here.
1226 When handling multiword bitfields, extract_bit_field may pass
1227 down a word_mode SUBREG of a larger REG for a bitfield that actually
1228 crosses a word boundary. Thus, for a SUBREG, we must find
1229 the current word starting from the base register. */
1230 if (GET_CODE (op0
) == SUBREG
)
1232 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
)
1233 + (offset
* unit
/ BITS_PER_WORD
);
1234 enum machine_mode sub_mode
= GET_MODE (SUBREG_REG (op0
));
1235 if (sub_mode
!= BLKmode
&& GET_MODE_SIZE (sub_mode
) < UNITS_PER_WORD
)
1236 word
= word_offset
? const0_rtx
: op0
;
1238 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1239 GET_MODE (SUBREG_REG (op0
)));
1240 offset
&= BITS_PER_WORD
/ unit
- 1;
1242 else if (REG_P (op0
))
1244 enum machine_mode op0_mode
= GET_MODE (op0
);
1245 if (op0_mode
!= BLKmode
&& GET_MODE_SIZE (op0_mode
) < UNITS_PER_WORD
)
1246 word
= offset
? const0_rtx
: op0
;
1248 word
= operand_subword_force (op0
, offset
* unit
/ BITS_PER_WORD
,
1250 offset
&= BITS_PER_WORD
/ unit
- 1;
1255 /* OFFSET is in UNITs, and UNIT is in bits. If WORD is const0_rtx,
1256 it is just an out-of-bounds access. Ignore it. */
1257 if (word
!= const0_rtx
)
1258 store_fixed_bit_field (word
, thissize
, offset
* unit
+ thispos
,
1259 bitregion_start
, bitregion_end
, part
);
1260 bitsdone
+= thissize
;
1264 /* A subroutine of extract_bit_field_1 that converts return value X
1265 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1266 to extract_bit_field. */
1269 convert_extracted_bit_field (rtx x
, enum machine_mode mode
,
1270 enum machine_mode tmode
, bool unsignedp
)
1272 if (GET_MODE (x
) == tmode
|| GET_MODE (x
) == mode
)
1275 /* If the x mode is not a scalar integral, first convert to the
1276 integer mode of that size and then access it as a floating-point
1277 value via a SUBREG. */
1278 if (!SCALAR_INT_MODE_P (tmode
))
1280 enum machine_mode smode
;
1282 smode
= mode_for_size (GET_MODE_BITSIZE (tmode
), MODE_INT
, 0);
1283 x
= convert_to_mode (smode
, x
, unsignedp
);
1284 x
= force_reg (smode
, x
);
1285 return gen_lowpart (tmode
, x
);
1288 return convert_to_mode (tmode
, x
, unsignedp
);
1291 /* Try to use an ext(z)v pattern to extract a field from OP0.
1292 Return the extracted value on success, otherwise return null.
1293 EXT_MODE is the mode of the extraction and the other arguments
1294 are as for extract_bit_field. */
1297 extract_bit_field_using_extv (const extraction_insn
*extv
, rtx op0
,
1298 unsigned HOST_WIDE_INT bitsize
,
1299 unsigned HOST_WIDE_INT bitnum
,
1300 int unsignedp
, rtx target
,
1301 enum machine_mode mode
, enum machine_mode tmode
)
1303 struct expand_operand ops
[4];
1304 rtx spec_target
= target
;
1305 rtx spec_target_subreg
= 0;
1306 enum machine_mode ext_mode
= extv
->field_mode
;
1307 unsigned unit
= GET_MODE_BITSIZE (ext_mode
);
1309 if (bitsize
== 0 || unit
< bitsize
)
1313 /* Get a reference to the first byte of the field. */
1314 op0
= narrow_bit_field_mem (op0
, extv
->struct_mode
, bitsize
, bitnum
,
1318 /* Convert from counting within OP0 to counting in EXT_MODE. */
1319 if (BYTES_BIG_ENDIAN
)
1320 bitnum
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
1322 /* If op0 is a register, we need it in EXT_MODE to make it
1323 acceptable to the format of ext(z)v. */
1324 if (GET_CODE (op0
) == SUBREG
&& GET_MODE (op0
) != ext_mode
)
1326 if (REG_P (op0
) && GET_MODE (op0
) != ext_mode
)
1327 op0
= gen_lowpart_SUBREG (ext_mode
, op0
);
1330 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
1331 "backwards" from the size of the unit we are extracting from.
1332 Otherwise, we count bits from the most significant on a
1333 BYTES/BITS_BIG_ENDIAN machine. */
1335 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1336 bitnum
= unit
- bitsize
- bitnum
;
1339 target
= spec_target
= gen_reg_rtx (tmode
);
1341 if (GET_MODE (target
) != ext_mode
)
1343 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1344 between the mode of the extraction (word_mode) and the target
1345 mode. Instead, create a temporary and use convert_move to set
1348 && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (target
), ext_mode
))
1350 target
= gen_lowpart (ext_mode
, target
);
1351 if (GET_MODE_PRECISION (ext_mode
)
1352 > GET_MODE_PRECISION (GET_MODE (spec_target
)))
1353 spec_target_subreg
= target
;
1356 target
= gen_reg_rtx (ext_mode
);
1359 create_output_operand (&ops
[0], target
, ext_mode
);
1360 create_fixed_operand (&ops
[1], op0
);
1361 create_integer_operand (&ops
[2], bitsize
);
1362 create_integer_operand (&ops
[3], bitnum
);
1363 if (maybe_expand_insn (extv
->icode
, 4, ops
))
1365 target
= ops
[0].value
;
1366 if (target
== spec_target
)
1368 if (target
== spec_target_subreg
)
1370 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1375 /* A subroutine of extract_bit_field, with the same arguments.
1376 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1377 if we can find no other means of implementing the operation.
1378 if FALLBACK_P is false, return NULL instead. */
1381 extract_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1382 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, rtx target
,
1383 enum machine_mode mode
, enum machine_mode tmode
,
1387 enum machine_mode int_mode
;
1388 enum machine_mode mode1
;
1390 if (tmode
== VOIDmode
)
1393 while (GET_CODE (op0
) == SUBREG
)
1395 bitnum
+= SUBREG_BYTE (op0
) * BITS_PER_UNIT
;
1396 op0
= SUBREG_REG (op0
);
1399 /* If we have an out-of-bounds access to a register, just return an
1400 uninitialized register of the required mode. This can occur if the
1401 source code contains an out-of-bounds access to a small array. */
1402 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
1403 return gen_reg_rtx (tmode
);
1406 && mode
== GET_MODE (op0
)
1408 && bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
1410 /* We're trying to extract a full register from itself. */
1414 /* See if we can get a better vector mode before extracting. */
1415 if (VECTOR_MODE_P (GET_MODE (op0
))
1417 && GET_MODE_INNER (GET_MODE (op0
)) != tmode
)
1419 enum machine_mode new_mode
;
1421 if (GET_MODE_CLASS (tmode
) == MODE_FLOAT
)
1422 new_mode
= MIN_MODE_VECTOR_FLOAT
;
1423 else if (GET_MODE_CLASS (tmode
) == MODE_FRACT
)
1424 new_mode
= MIN_MODE_VECTOR_FRACT
;
1425 else if (GET_MODE_CLASS (tmode
) == MODE_UFRACT
)
1426 new_mode
= MIN_MODE_VECTOR_UFRACT
;
1427 else if (GET_MODE_CLASS (tmode
) == MODE_ACCUM
)
1428 new_mode
= MIN_MODE_VECTOR_ACCUM
;
1429 else if (GET_MODE_CLASS (tmode
) == MODE_UACCUM
)
1430 new_mode
= MIN_MODE_VECTOR_UACCUM
;
1432 new_mode
= MIN_MODE_VECTOR_INT
;
1434 for (; new_mode
!= VOIDmode
; new_mode
= GET_MODE_WIDER_MODE (new_mode
))
1435 if (GET_MODE_SIZE (new_mode
) == GET_MODE_SIZE (GET_MODE (op0
))
1436 && targetm
.vector_mode_supported_p (new_mode
))
1438 if (new_mode
!= VOIDmode
)
1439 op0
= gen_lowpart (new_mode
, op0
);
1442 /* Use vec_extract patterns for extracting parts of vectors whenever
1444 if (VECTOR_MODE_P (GET_MODE (op0
))
1446 && optab_handler (vec_extract_optab
, GET_MODE (op0
)) != CODE_FOR_nothing
1447 && ((bitnum
+ bitsize
- 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
1448 == bitnum
/ GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
1450 struct expand_operand ops
[3];
1451 enum machine_mode outermode
= GET_MODE (op0
);
1452 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
1453 enum insn_code icode
= optab_handler (vec_extract_optab
, outermode
);
1454 unsigned HOST_WIDE_INT pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
1456 create_output_operand (&ops
[0], target
, innermode
);
1457 create_input_operand (&ops
[1], op0
, outermode
);
1458 create_integer_operand (&ops
[2], pos
);
1459 if (maybe_expand_insn (icode
, 3, ops
))
1461 target
= ops
[0].value
;
1462 if (GET_MODE (target
) != mode
)
1463 return gen_lowpart (tmode
, target
);
1468 /* Make sure we are playing with integral modes. Pun with subregs
1471 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
1472 if (imode
!= GET_MODE (op0
))
1475 op0
= adjust_bitfield_address_size (op0
, imode
, 0, MEM_SIZE (op0
));
1476 else if (imode
!= BLKmode
)
1478 op0
= gen_lowpart (imode
, op0
);
1480 /* If we got a SUBREG, force it into a register since we
1481 aren't going to be able to do another SUBREG on it. */
1482 if (GET_CODE (op0
) == SUBREG
)
1483 op0
= force_reg (imode
, op0
);
1485 else if (REG_P (op0
))
1488 imode
= smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0
)),
1490 reg
= gen_reg_rtx (imode
);
1491 subreg
= gen_lowpart_SUBREG (GET_MODE (op0
), reg
);
1492 emit_move_insn (subreg
, op0
);
1494 bitnum
+= SUBREG_BYTE (subreg
) * BITS_PER_UNIT
;
1498 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (op0
));
1499 rtx mem
= assign_stack_temp (GET_MODE (op0
), size
);
1500 emit_move_insn (mem
, op0
);
1501 op0
= adjust_bitfield_address_size (mem
, BLKmode
, 0, size
);
1506 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1507 If that's wrong, the solution is to test for it and set TARGET to 0
1510 /* Get the mode of the field to use for atomic access or subreg
1513 if (SCALAR_INT_MODE_P (tmode
))
1515 enum machine_mode try_mode
= mode_for_size (bitsize
,
1516 GET_MODE_CLASS (tmode
), 0);
1517 if (try_mode
!= BLKmode
)
1520 gcc_assert (mode1
!= BLKmode
);
1522 /* Extraction of a full MODE1 value can be done with a subreg as long
1523 as the least significant bit of the value is the least significant
1524 bit of either OP0 or a word of OP0. */
1526 && lowpart_bit_field_p (bitnum
, bitsize
, GET_MODE (op0
))
1527 && bitsize
== GET_MODE_BITSIZE (mode1
)
1528 && TRULY_NOOP_TRUNCATION_MODES_P (mode1
, GET_MODE (op0
)))
1530 rtx sub
= simplify_gen_subreg (mode1
, op0
, GET_MODE (op0
),
1531 bitnum
/ BITS_PER_UNIT
);
1533 return convert_extracted_bit_field (sub
, mode
, tmode
, unsignedp
);
1536 /* Extraction of a full MODE1 value can be done with a load as long as
1537 the field is on a byte boundary and is sufficiently aligned. */
1538 if (simple_mem_bitfield_p (op0
, bitsize
, bitnum
, mode1
))
1540 op0
= adjust_bitfield_address (op0
, mode1
, bitnum
/ BITS_PER_UNIT
);
1541 return convert_extracted_bit_field (op0
, mode
, tmode
, unsignedp
);
1544 /* Handle fields bigger than a word. */
1546 if (bitsize
> BITS_PER_WORD
)
1548 /* Here we transfer the words of the field
1549 in the order least significant first.
1550 This is because the most significant word is the one which may
1551 be less than full. */
1553 unsigned int backwards
= WORDS_BIG_ENDIAN
;
1554 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
1558 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1559 target
= gen_reg_rtx (mode
);
1561 /* Indicate for flow that the entire target reg is being set. */
1562 emit_clobber (target
);
1564 last
= get_last_insn ();
1565 for (i
= 0; i
< nwords
; i
++)
1567 /* If I is 0, use the low-order word in both field and target;
1568 if I is 1, use the next to lowest word; and so on. */
1569 /* Word number in TARGET to use. */
1570 unsigned int wordnum
1572 ? GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
- i
- 1
1574 /* Offset from start of field in OP0. */
1575 unsigned int bit_offset
= (backwards
1576 ? MAX ((int) bitsize
- ((int) i
+ 1)
1579 : (int) i
* BITS_PER_WORD
);
1580 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1582 = extract_bit_field_1 (op0
, MIN (BITS_PER_WORD
,
1583 bitsize
- i
* BITS_PER_WORD
),
1584 bitnum
+ bit_offset
, 1, target_part
,
1585 mode
, word_mode
, fallback_p
);
1587 gcc_assert (target_part
);
1590 delete_insns_since (last
);
1594 if (result_part
!= target_part
)
1595 emit_move_insn (target_part
, result_part
);
1600 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1601 need to be zero'd out. */
1602 if (GET_MODE_SIZE (GET_MODE (target
)) > nwords
* UNITS_PER_WORD
)
1604 unsigned int i
, total_words
;
1606 total_words
= GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
;
1607 for (i
= nwords
; i
< total_words
; i
++)
1609 (operand_subword (target
,
1610 backwards
? total_words
- i
- 1 : i
,
1617 /* Signed bit field: sign-extend with two arithmetic shifts. */
1618 target
= expand_shift (LSHIFT_EXPR
, mode
, target
,
1619 GET_MODE_BITSIZE (mode
) - bitsize
, NULL_RTX
, 0);
1620 return expand_shift (RSHIFT_EXPR
, mode
, target
,
1621 GET_MODE_BITSIZE (mode
) - bitsize
, NULL_RTX
, 0);
1624 /* If OP0 is a multi-word register, narrow it to the affected word.
1625 If the region spans two words, defer to extract_split_bit_field. */
1626 if (!MEM_P (op0
) && GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
1628 op0
= simplify_gen_subreg (word_mode
, op0
, GET_MODE (op0
),
1629 bitnum
/ BITS_PER_WORD
* UNITS_PER_WORD
);
1630 bitnum
%= BITS_PER_WORD
;
1631 if (bitnum
+ bitsize
> BITS_PER_WORD
)
1635 target
= extract_split_bit_field (op0
, bitsize
, bitnum
, unsignedp
);
1636 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1640 /* From here on we know the desired field is smaller than a word.
1641 If OP0 is a register, it too fits within a word. */
1642 enum extraction_pattern pattern
= unsignedp
? EP_extzv
: EP_extv
;
1643 extraction_insn extv
;
1645 /* ??? We could limit the structure size to the part of OP0 that
1646 contains the field, with appropriate checks for endianness
1647 and TRULY_NOOP_TRUNCATION. */
1648 && get_best_reg_extraction_insn (&extv
, pattern
,
1649 GET_MODE_BITSIZE (GET_MODE (op0
)),
1652 rtx result
= extract_bit_field_using_extv (&extv
, op0
, bitsize
, bitnum
,
1653 unsignedp
, target
, mode
,
1659 /* If OP0 is a memory, try copying it to a register and seeing if a
1660 cheap register alternative is available. */
1663 if (get_best_mem_extraction_insn (&extv
, pattern
, bitsize
, bitnum
,
1666 rtx result
= extract_bit_field_using_extv (&extv
, op0
, bitsize
,
1674 rtx last
= get_last_insn ();
1676 /* Try loading part of OP0 into a register and extracting the
1677 bitfield from that. */
1678 unsigned HOST_WIDE_INT bitpos
;
1679 rtx xop0
= adjust_bit_field_mem_for_reg (pattern
, op0
, bitsize
, bitnum
,
1680 0, 0, tmode
, &bitpos
);
1683 xop0
= copy_to_reg (xop0
);
1684 rtx result
= extract_bit_field_1 (xop0
, bitsize
, bitpos
,
1686 mode
, tmode
, false);
1689 delete_insns_since (last
);
1696 /* Find a correspondingly-sized integer field, so we can apply
1697 shifts and masks to it. */
1698 int_mode
= int_mode_for_mode (tmode
);
1699 if (int_mode
== BLKmode
)
1700 int_mode
= int_mode_for_mode (mode
);
1701 /* Should probably push op0 out to memory and then do a load. */
1702 gcc_assert (int_mode
!= BLKmode
);
1704 target
= extract_fixed_bit_field (int_mode
, op0
, bitsize
, bitnum
,
1706 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1709 /* Generate code to extract a byte-field from STR_RTX
1710 containing BITSIZE bits, starting at BITNUM,
1711 and put it in TARGET if possible (if TARGET is nonzero).
1712 Regardless of TARGET, we return the rtx for where the value is placed.
1714 STR_RTX is the structure containing the byte (a REG or MEM).
1715 UNSIGNEDP is nonzero if this is an unsigned bit field.
1716 MODE is the natural mode of the field value once extracted.
1717 TMODE is the mode the caller would like the value to have;
1718 but the value may be returned with type MODE instead.
1720 If a TARGET is specified and we can store in it at no extra cost,
1721 we do so, and return TARGET.
1722 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1723 if they are equally easy. */
1726 extract_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1727 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, rtx target
,
1728 enum machine_mode mode
, enum machine_mode tmode
)
1730 enum machine_mode mode1
;
1732 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
1733 if (GET_MODE_BITSIZE (GET_MODE (str_rtx
)) > 0)
1734 mode1
= GET_MODE (str_rtx
);
1735 else if (target
&& GET_MODE_BITSIZE (GET_MODE (target
)) > 0)
1736 mode1
= GET_MODE (target
);
1740 if (strict_volatile_bitfield_p (str_rtx
, bitsize
, bitnum
, mode1
, 0, 0))
1744 /* Extraction of a full MODE1 value can be done with a load as long as
1745 the field is on a byte boundary and is sufficiently aligned. */
1746 if (simple_mem_bitfield_p (str_rtx
, bitsize
, bitnum
, mode1
))
1747 result
= adjust_bitfield_address (str_rtx
, mode1
,
1748 bitnum
/ BITS_PER_UNIT
);
1751 str_rtx
= narrow_bit_field_mem (str_rtx
, mode1
, bitsize
, bitnum
,
1753 result
= extract_fixed_bit_field_1 (mode
, str_rtx
, bitsize
, bitnum
,
1757 return convert_extracted_bit_field (result
, mode
, tmode
, unsignedp
);
1760 return extract_bit_field_1 (str_rtx
, bitsize
, bitnum
, unsignedp
,
1761 target
, mode
, tmode
, true);
1764 /* Use shifts and boolean operations to extract a field of BITSIZE bits
1765 from bit BITNUM of OP0.
1767 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1768 If TARGET is nonzero, attempts to store the value there
1769 and return TARGET, but this is not guaranteed.
1770 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1773 extract_fixed_bit_field (enum machine_mode tmode
, rtx op0
,
1774 unsigned HOST_WIDE_INT bitsize
,
1775 unsigned HOST_WIDE_INT bitnum
, rtx target
,
1780 enum machine_mode mode
1781 = get_best_mode (bitsize
, bitnum
, 0, 0, MEM_ALIGN (op0
), word_mode
,
1782 MEM_VOLATILE_P (op0
));
1784 if (mode
== VOIDmode
)
1785 /* The only way this should occur is if the field spans word
1787 return extract_split_bit_field (op0
, bitsize
, bitnum
, unsignedp
);
1789 op0
= narrow_bit_field_mem (op0
, mode
, bitsize
, bitnum
, &bitnum
);
1792 return extract_fixed_bit_field_1 (tmode
, op0
, bitsize
, bitnum
,
1796 /* Helper function for extract_fixed_bit_field, extracts
1797 the bit field always using the MODE of OP0. */
1800 extract_fixed_bit_field_1 (enum machine_mode tmode
, rtx op0
,
1801 unsigned HOST_WIDE_INT bitsize
,
1802 unsigned HOST_WIDE_INT bitnum
, rtx target
,
1805 enum machine_mode mode
= GET_MODE (op0
);
1806 gcc_assert (SCALAR_INT_MODE_P (mode
));
1808 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1809 for invalid input, such as extract equivalent of f5 from
1810 gcc.dg/pr48335-2.c. */
1812 if (BYTES_BIG_ENDIAN
)
1813 /* BITNUM is the distance between our msb and that of OP0.
1814 Convert it to the distance from the lsb. */
1815 bitnum
= GET_MODE_BITSIZE (mode
) - bitsize
- bitnum
;
1817 /* Now BITNUM is always the distance between the field's lsb and that of OP0.
1818 We have reduced the big-endian case to the little-endian case. */
1824 /* If the field does not already start at the lsb,
1825 shift it so it does. */
1826 /* Maybe propagate the target for the shift. */
1827 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1830 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, bitnum
, subtarget
, 1);
1832 /* Convert the value to the desired mode. */
1834 op0
= convert_to_mode (tmode
, op0
, 1);
1836 /* Unless the msb of the field used to be the msb when we shifted,
1837 mask out the upper bits. */
1839 if (GET_MODE_BITSIZE (mode
) != bitnum
+ bitsize
)
1840 return expand_binop (GET_MODE (op0
), and_optab
, op0
,
1841 mask_rtx (GET_MODE (op0
), 0, bitsize
, 0),
1842 target
, 1, OPTAB_LIB_WIDEN
);
1846 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1847 then arithmetic-shift its lsb to the lsb of the word. */
1848 op0
= force_reg (mode
, op0
);
1850 /* Find the narrowest integer mode that contains the field. */
1852 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1853 mode
= GET_MODE_WIDER_MODE (mode
))
1854 if (GET_MODE_BITSIZE (mode
) >= bitsize
+ bitnum
)
1856 op0
= convert_to_mode (mode
, op0
, 0);
1863 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitnum
))
1865 int amount
= GET_MODE_BITSIZE (mode
) - (bitsize
+ bitnum
);
1866 /* Maybe propagate the target for the shift. */
1867 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1868 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1871 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
1872 GET_MODE_BITSIZE (mode
) - bitsize
, target
, 0);
1875 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1879 lshift_value (enum machine_mode mode
, unsigned HOST_WIDE_INT value
,
1882 return immed_wide_int_const (wi::lshift (value
, bitpos
), mode
);
1885 /* Extract a bit field that is split across two words
1886 and return an RTX for the result.
1888 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1889 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1890 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1893 extract_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1894 unsigned HOST_WIDE_INT bitpos
, int unsignedp
)
1897 unsigned int bitsdone
= 0;
1898 rtx result
= NULL_RTX
;
1901 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1903 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1904 unit
= BITS_PER_WORD
;
1906 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1908 while (bitsdone
< bitsize
)
1910 unsigned HOST_WIDE_INT thissize
;
1912 unsigned HOST_WIDE_INT thispos
;
1913 unsigned HOST_WIDE_INT offset
;
1915 offset
= (bitpos
+ bitsdone
) / unit
;
1916 thispos
= (bitpos
+ bitsdone
) % unit
;
1918 /* THISSIZE must not overrun a word boundary. Otherwise,
1919 extract_fixed_bit_field will call us again, and we will mutually
1921 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1922 thissize
= MIN (thissize
, unit
- thispos
);
1924 /* If OP0 is a register, then handle OFFSET here.
1926 When handling multiword bitfields, extract_bit_field may pass
1927 down a word_mode SUBREG of a larger REG for a bitfield that actually
1928 crosses a word boundary. Thus, for a SUBREG, we must find
1929 the current word starting from the base register. */
1930 if (GET_CODE (op0
) == SUBREG
)
1932 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1933 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1934 GET_MODE (SUBREG_REG (op0
)));
1937 else if (REG_P (op0
))
1939 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1945 /* Extract the parts in bit-counting order,
1946 whose meaning is determined by BYTES_PER_UNIT.
1947 OFFSET is in UNITs, and UNIT is in bits. */
1948 part
= extract_fixed_bit_field (word_mode
, word
, thissize
,
1949 offset
* unit
+ thispos
, 0, 1);
1950 bitsdone
+= thissize
;
1952 /* Shift this part into place for the result. */
1953 if (BYTES_BIG_ENDIAN
)
1955 if (bitsize
!= bitsdone
)
1956 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1957 bitsize
- bitsdone
, 0, 1);
1961 if (bitsdone
!= thissize
)
1962 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1963 bitsdone
- thissize
, 0, 1);
1969 /* Combine the parts with bitwise or. This works
1970 because we extracted each part as an unsigned bit field. */
1971 result
= expand_binop (word_mode
, ior_optab
, part
, result
, NULL_RTX
, 1,
1977 /* Unsigned bit field: we are done. */
1980 /* Signed bit field: sign-extend with two arithmetic shifts. */
1981 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
1982 BITS_PER_WORD
- bitsize
, NULL_RTX
, 0);
1983 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
1984 BITS_PER_WORD
- bitsize
, NULL_RTX
, 0);
1987 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
1988 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
1989 MODE, fill the upper bits with zeros. Fail if the layout of either
1990 mode is unknown (as for CC modes) or if the extraction would involve
1991 unprofitable mode punning. Return the value on success, otherwise
1994 This is different from gen_lowpart* in these respects:
1996 - the returned value must always be considered an rvalue
1998 - when MODE is wider than SRC_MODE, the extraction involves
2001 - when MODE is smaller than SRC_MODE, the extraction involves
2002 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
2004 In other words, this routine performs a computation, whereas the
2005 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2009 extract_low_bits (enum machine_mode mode
, enum machine_mode src_mode
, rtx src
)
2011 enum machine_mode int_mode
, src_int_mode
;
2013 if (mode
== src_mode
)
2016 if (CONSTANT_P (src
))
2018 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2019 fails, it will happily create (subreg (symbol_ref)) or similar
2021 unsigned int byte
= subreg_lowpart_offset (mode
, src_mode
);
2022 rtx ret
= simplify_subreg (mode
, src
, src_mode
, byte
);
2026 if (GET_MODE (src
) == VOIDmode
2027 || !validate_subreg (mode
, src_mode
, src
, byte
))
2030 src
= force_reg (GET_MODE (src
), src
);
2031 return gen_rtx_SUBREG (mode
, src
, byte
);
2034 if (GET_MODE_CLASS (mode
) == MODE_CC
|| GET_MODE_CLASS (src_mode
) == MODE_CC
)
2037 if (GET_MODE_BITSIZE (mode
) == GET_MODE_BITSIZE (src_mode
)
2038 && MODES_TIEABLE_P (mode
, src_mode
))
2040 rtx x
= gen_lowpart_common (mode
, src
);
2045 src_int_mode
= int_mode_for_mode (src_mode
);
2046 int_mode
= int_mode_for_mode (mode
);
2047 if (src_int_mode
== BLKmode
|| int_mode
== BLKmode
)
2050 if (!MODES_TIEABLE_P (src_int_mode
, src_mode
))
2052 if (!MODES_TIEABLE_P (int_mode
, mode
))
2055 src
= gen_lowpart (src_int_mode
, src
);
2056 src
= convert_modes (int_mode
, src_int_mode
, src
, true);
2057 src
= gen_lowpart (mode
, src
);
2061 /* Add INC into TARGET. */
2064 expand_inc (rtx target
, rtx inc
)
2066 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
2068 target
, 0, OPTAB_LIB_WIDEN
);
2069 if (value
!= target
)
2070 emit_move_insn (target
, value
);
2073 /* Subtract DEC from TARGET. */
2076 expand_dec (rtx target
, rtx dec
)
2078 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
2080 target
, 0, OPTAB_LIB_WIDEN
);
2081 if (value
!= target
)
2082 emit_move_insn (target
, value
);
2085 /* Output a shift instruction for expression code CODE,
2086 with SHIFTED being the rtx for the value to shift,
2087 and AMOUNT the rtx for the amount to shift by.
2088 Store the result in the rtx TARGET, if that is convenient.
2089 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2090 Return the rtx for where the value is. */
2093 expand_shift_1 (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2094 rtx amount
, rtx target
, int unsignedp
)
2097 int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
2098 int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
2099 optab lshift_optab
= ashl_optab
;
2100 optab rshift_arith_optab
= ashr_optab
;
2101 optab rshift_uns_optab
= lshr_optab
;
2102 optab lrotate_optab
= rotl_optab
;
2103 optab rrotate_optab
= rotr_optab
;
2104 enum machine_mode op1_mode
;
2105 enum machine_mode scalar_mode
= mode
;
2107 bool speed
= optimize_insn_for_speed_p ();
2109 if (VECTOR_MODE_P (mode
))
2110 scalar_mode
= GET_MODE_INNER (mode
);
2112 op1_mode
= GET_MODE (op1
);
2114 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2115 shift amount is a vector, use the vector/vector shift patterns. */
2116 if (VECTOR_MODE_P (mode
) && VECTOR_MODE_P (op1_mode
))
2118 lshift_optab
= vashl_optab
;
2119 rshift_arith_optab
= vashr_optab
;
2120 rshift_uns_optab
= vlshr_optab
;
2121 lrotate_optab
= vrotl_optab
;
2122 rrotate_optab
= vrotr_optab
;
2125 /* Previously detected shift-counts computed by NEGATE_EXPR
2126 and shifted in the other direction; but that does not work
2129 if (SHIFT_COUNT_TRUNCATED
)
2131 if (CONST_INT_P (op1
)
2132 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
) >=
2133 (unsigned HOST_WIDE_INT
) GET_MODE_BITSIZE (scalar_mode
)))
2134 op1
= GEN_INT ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
2135 % GET_MODE_BITSIZE (scalar_mode
));
2136 else if (GET_CODE (op1
) == SUBREG
2137 && subreg_lowpart_p (op1
)
2138 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1
)))
2139 && SCALAR_INT_MODE_P (GET_MODE (op1
)))
2140 op1
= SUBREG_REG (op1
);
2143 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
2144 prefer left rotation, if op1 is from bitsize / 2 + 1 to
2145 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
2148 && CONST_INT_P (op1
)
2149 && IN_RANGE (INTVAL (op1
), GET_MODE_BITSIZE (scalar_mode
) / 2 + left
,
2150 GET_MODE_BITSIZE (scalar_mode
) - 1))
2152 op1
= GEN_INT (GET_MODE_BITSIZE (scalar_mode
) - INTVAL (op1
));
2154 code
= left
? LROTATE_EXPR
: RROTATE_EXPR
;
2157 if (op1
== const0_rtx
)
2160 /* Check whether its cheaper to implement a left shift by a constant
2161 bit count by a sequence of additions. */
2162 if (code
== LSHIFT_EXPR
2163 && CONST_INT_P (op1
)
2165 && INTVAL (op1
) < GET_MODE_PRECISION (scalar_mode
)
2166 && INTVAL (op1
) < MAX_BITS_PER_WORD
2167 && (shift_cost (speed
, mode
, INTVAL (op1
))
2168 > INTVAL (op1
) * add_cost (speed
, mode
))
2169 && shift_cost (speed
, mode
, INTVAL (op1
)) != MAX_COST
)
2172 for (i
= 0; i
< INTVAL (op1
); i
++)
2174 temp
= force_reg (mode
, shifted
);
2175 shifted
= expand_binop (mode
, add_optab
, temp
, temp
, NULL_RTX
,
2176 unsignedp
, OPTAB_LIB_WIDEN
);
2181 for (attempt
= 0; temp
== 0 && attempt
< 3; attempt
++)
2183 enum optab_methods methods
;
2186 methods
= OPTAB_DIRECT
;
2187 else if (attempt
== 1)
2188 methods
= OPTAB_WIDEN
;
2190 methods
= OPTAB_LIB_WIDEN
;
2194 /* Widening does not work for rotation. */
2195 if (methods
== OPTAB_WIDEN
)
2197 else if (methods
== OPTAB_LIB_WIDEN
)
2199 /* If we have been unable to open-code this by a rotation,
2200 do it as the IOR of two shifts. I.e., to rotate A
2202 (A << N) | ((unsigned) A >> ((-N) & (C - 1)))
2203 where C is the bitsize of A.
2205 It is theoretically possible that the target machine might
2206 not be able to perform either shift and hence we would
2207 be making two libcalls rather than just the one for the
2208 shift (similarly if IOR could not be done). We will allow
2209 this extremely unlikely lossage to avoid complicating the
2212 rtx subtarget
= target
== shifted
? 0 : target
;
2213 rtx new_amount
, other_amount
;
2217 if (op1
== const0_rtx
)
2219 else if (CONST_INT_P (op1
))
2220 other_amount
= GEN_INT (GET_MODE_BITSIZE (scalar_mode
)
2225 = simplify_gen_unary (NEG
, GET_MODE (op1
),
2226 op1
, GET_MODE (op1
));
2227 HOST_WIDE_INT mask
= GET_MODE_PRECISION (scalar_mode
) - 1;
2229 = simplify_gen_binary (AND
, GET_MODE (op1
), other_amount
,
2230 gen_int_mode (mask
, GET_MODE (op1
)));
2233 shifted
= force_reg (mode
, shifted
);
2235 temp
= expand_shift_1 (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
2236 mode
, shifted
, new_amount
, 0, 1);
2237 temp1
= expand_shift_1 (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
2238 mode
, shifted
, other_amount
,
2240 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
2241 unsignedp
, methods
);
2244 temp
= expand_binop (mode
,
2245 left
? lrotate_optab
: rrotate_optab
,
2246 shifted
, op1
, target
, unsignedp
, methods
);
2249 temp
= expand_binop (mode
,
2250 left
? lshift_optab
: rshift_uns_optab
,
2251 shifted
, op1
, target
, unsignedp
, methods
);
2253 /* Do arithmetic shifts.
2254 Also, if we are going to widen the operand, we can just as well
2255 use an arithmetic right-shift instead of a logical one. */
2256 if (temp
== 0 && ! rotate
2257 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
2259 enum optab_methods methods1
= methods
;
2261 /* If trying to widen a log shift to an arithmetic shift,
2262 don't accept an arithmetic shift of the same size. */
2264 methods1
= OPTAB_MUST_WIDEN
;
2266 /* Arithmetic shift */
2268 temp
= expand_binop (mode
,
2269 left
? lshift_optab
: rshift_arith_optab
,
2270 shifted
, op1
, target
, unsignedp
, methods1
);
2273 /* We used to try extzv here for logical right shifts, but that was
2274 only useful for one machine, the VAX, and caused poor code
2275 generation there for lshrdi3, so the code was deleted and a
2276 define_expand for lshrsi3 was added to vax.md. */
2283 /* Output a shift instruction for expression code CODE,
2284 with SHIFTED being the rtx for the value to shift,
2285 and AMOUNT the amount to shift by.
2286 Store the result in the rtx TARGET, if that is convenient.
2287 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2288 Return the rtx for where the value is. */
2291 expand_shift (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2292 int amount
, rtx target
, int unsignedp
)
2294 return expand_shift_1 (code
, mode
,
2295 shifted
, GEN_INT (amount
), target
, unsignedp
);
2298 /* Output a shift instruction for expression code CODE,
2299 with SHIFTED being the rtx for the value to shift,
2300 and AMOUNT the tree for the amount to shift by.
2301 Store the result in the rtx TARGET, if that is convenient.
2302 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2303 Return the rtx for where the value is. */
2306 expand_variable_shift (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2307 tree amount
, rtx target
, int unsignedp
)
2309 return expand_shift_1 (code
, mode
,
2310 shifted
, expand_normal (amount
), target
, unsignedp
);
2314 /* Indicates the type of fixup needed after a constant multiplication.
2315 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2316 the result should be negated, and ADD_VARIANT means that the
2317 multiplicand should be added to the result. */
2318 enum mult_variant
{basic_variant
, negate_variant
, add_variant
};
2320 static void synth_mult (struct algorithm
*, unsigned HOST_WIDE_INT
,
2321 const struct mult_cost
*, enum machine_mode mode
);
2322 static bool choose_mult_variant (enum machine_mode
, HOST_WIDE_INT
,
2323 struct algorithm
*, enum mult_variant
*, int);
2324 static rtx
expand_mult_const (enum machine_mode
, rtx
, HOST_WIDE_INT
, rtx
,
2325 const struct algorithm
*, enum mult_variant
);
2326 static unsigned HOST_WIDE_INT
invert_mod2n (unsigned HOST_WIDE_INT
, int);
2327 static rtx
extract_high_half (enum machine_mode
, rtx
);
2328 static rtx
expmed_mult_highpart (enum machine_mode
, rtx
, rtx
, rtx
, int, int);
2329 static rtx
expmed_mult_highpart_optab (enum machine_mode
, rtx
, rtx
, rtx
,
2331 /* Compute and return the best algorithm for multiplying by T.
2332 The algorithm must cost less than cost_limit
2333 If retval.cost >= COST_LIMIT, no algorithm was found and all
2334 other field of the returned struct are undefined.
2335 MODE is the machine mode of the multiplication. */
2338 synth_mult (struct algorithm
*alg_out
, unsigned HOST_WIDE_INT t
,
2339 const struct mult_cost
*cost_limit
, enum machine_mode mode
)
2342 struct algorithm
*alg_in
, *best_alg
;
2343 struct mult_cost best_cost
;
2344 struct mult_cost new_limit
;
2345 int op_cost
, op_latency
;
2346 unsigned HOST_WIDE_INT orig_t
= t
;
2347 unsigned HOST_WIDE_INT q
;
2348 int maxm
, hash_index
;
2349 bool cache_hit
= false;
2350 enum alg_code cache_alg
= alg_zero
;
2351 bool speed
= optimize_insn_for_speed_p ();
2352 enum machine_mode imode
;
2353 struct alg_hash_entry
*entry_ptr
;
2355 /* Indicate that no algorithm is yet found. If no algorithm
2356 is found, this value will be returned and indicate failure. */
2357 alg_out
->cost
.cost
= cost_limit
->cost
+ 1;
2358 alg_out
->cost
.latency
= cost_limit
->latency
+ 1;
2360 if (cost_limit
->cost
< 0
2361 || (cost_limit
->cost
== 0 && cost_limit
->latency
<= 0))
2364 /* Be prepared for vector modes. */
2365 imode
= GET_MODE_INNER (mode
);
2366 if (imode
== VOIDmode
)
2369 maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (imode
));
2371 /* Restrict the bits of "t" to the multiplication's mode. */
2372 t
&= GET_MODE_MASK (imode
);
2374 /* t == 1 can be done in zero cost. */
2378 alg_out
->cost
.cost
= 0;
2379 alg_out
->cost
.latency
= 0;
2380 alg_out
->op
[0] = alg_m
;
2384 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2388 if (MULT_COST_LESS (cost_limit
, zero_cost (speed
)))
2393 alg_out
->cost
.cost
= zero_cost (speed
);
2394 alg_out
->cost
.latency
= zero_cost (speed
);
2395 alg_out
->op
[0] = alg_zero
;
2400 /* We'll be needing a couple extra algorithm structures now. */
2402 alg_in
= XALLOCA (struct algorithm
);
2403 best_alg
= XALLOCA (struct algorithm
);
2404 best_cost
= *cost_limit
;
2406 /* Compute the hash index. */
2407 hash_index
= (t
^ (unsigned int) mode
^ (speed
* 256)) % NUM_ALG_HASH_ENTRIES
;
2409 /* See if we already know what to do for T. */
2410 entry_ptr
= alg_hash_entry_ptr (hash_index
);
2411 if (entry_ptr
->t
== t
2412 && entry_ptr
->mode
== mode
2413 && entry_ptr
->mode
== mode
2414 && entry_ptr
->speed
== speed
2415 && entry_ptr
->alg
!= alg_unknown
)
2417 cache_alg
= entry_ptr
->alg
;
2419 if (cache_alg
== alg_impossible
)
2421 /* The cache tells us that it's impossible to synthesize
2422 multiplication by T within entry_ptr->cost. */
2423 if (!CHEAPER_MULT_COST (&entry_ptr
->cost
, cost_limit
))
2424 /* COST_LIMIT is at least as restrictive as the one
2425 recorded in the hash table, in which case we have no
2426 hope of synthesizing a multiplication. Just
2430 /* If we get here, COST_LIMIT is less restrictive than the
2431 one recorded in the hash table, so we may be able to
2432 synthesize a multiplication. Proceed as if we didn't
2433 have the cache entry. */
2437 if (CHEAPER_MULT_COST (cost_limit
, &entry_ptr
->cost
))
2438 /* The cached algorithm shows that this multiplication
2439 requires more cost than COST_LIMIT. Just return. This
2440 way, we don't clobber this cache entry with
2441 alg_impossible but retain useful information. */
2453 goto do_alg_addsub_t_m2
;
2455 case alg_add_factor
:
2456 case alg_sub_factor
:
2457 goto do_alg_addsub_factor
;
2460 goto do_alg_add_t2_m
;
2463 goto do_alg_sub_t2_m
;
2471 /* If we have a group of zero bits at the low-order part of T, try
2472 multiplying by the remaining bits and then doing a shift. */
2477 m
= floor_log2 (t
& -t
); /* m = number of low zero bits */
2481 /* The function expand_shift will choose between a shift and
2482 a sequence of additions, so the observed cost is given as
2483 MIN (m * add_cost(speed, mode), shift_cost(speed, mode, m)). */
2484 op_cost
= m
* add_cost (speed
, mode
);
2485 if (shift_cost (speed
, mode
, m
) < op_cost
)
2486 op_cost
= shift_cost (speed
, mode
, m
);
2487 new_limit
.cost
= best_cost
.cost
- op_cost
;
2488 new_limit
.latency
= best_cost
.latency
- op_cost
;
2489 synth_mult (alg_in
, q
, &new_limit
, mode
);
2491 alg_in
->cost
.cost
+= op_cost
;
2492 alg_in
->cost
.latency
+= op_cost
;
2493 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2495 struct algorithm
*x
;
2496 best_cost
= alg_in
->cost
;
2497 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2498 best_alg
->log
[best_alg
->ops
] = m
;
2499 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2502 /* See if treating ORIG_T as a signed number yields a better
2503 sequence. Try this sequence only for a negative ORIG_T
2504 as it would be useless for a non-negative ORIG_T. */
2505 if ((HOST_WIDE_INT
) orig_t
< 0)
2507 /* Shift ORIG_T as follows because a right shift of a
2508 negative-valued signed type is implementation
2510 q
= ~(~orig_t
>> m
);
2511 /* The function expand_shift will choose between a shift
2512 and a sequence of additions, so the observed cost is
2513 given as MIN (m * add_cost(speed, mode),
2514 shift_cost(speed, mode, m)). */
2515 op_cost
= m
* add_cost (speed
, mode
);
2516 if (shift_cost (speed
, mode
, m
) < op_cost
)
2517 op_cost
= shift_cost (speed
, mode
, m
);
2518 new_limit
.cost
= best_cost
.cost
- op_cost
;
2519 new_limit
.latency
= best_cost
.latency
- op_cost
;
2520 synth_mult (alg_in
, q
, &new_limit
, mode
);
2522 alg_in
->cost
.cost
+= op_cost
;
2523 alg_in
->cost
.latency
+= op_cost
;
2524 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2526 struct algorithm
*x
;
2527 best_cost
= alg_in
->cost
;
2528 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2529 best_alg
->log
[best_alg
->ops
] = m
;
2530 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2538 /* If we have an odd number, add or subtract one. */
2541 unsigned HOST_WIDE_INT w
;
2544 for (w
= 1; (w
& t
) != 0; w
<<= 1)
2546 /* If T was -1, then W will be zero after the loop. This is another
2547 case where T ends with ...111. Handling this with (T + 1) and
2548 subtract 1 produces slightly better code and results in algorithm
2549 selection much faster than treating it like the ...0111 case
2553 /* Reject the case where t is 3.
2554 Thus we prefer addition in that case. */
2557 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2559 op_cost
= add_cost (speed
, mode
);
2560 new_limit
.cost
= best_cost
.cost
- op_cost
;
2561 new_limit
.latency
= best_cost
.latency
- op_cost
;
2562 synth_mult (alg_in
, t
+ 1, &new_limit
, mode
);
2564 alg_in
->cost
.cost
+= op_cost
;
2565 alg_in
->cost
.latency
+= op_cost
;
2566 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2568 struct algorithm
*x
;
2569 best_cost
= alg_in
->cost
;
2570 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2571 best_alg
->log
[best_alg
->ops
] = 0;
2572 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2577 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2579 op_cost
= add_cost (speed
, mode
);
2580 new_limit
.cost
= best_cost
.cost
- op_cost
;
2581 new_limit
.latency
= best_cost
.latency
- op_cost
;
2582 synth_mult (alg_in
, t
- 1, &new_limit
, mode
);
2584 alg_in
->cost
.cost
+= op_cost
;
2585 alg_in
->cost
.latency
+= op_cost
;
2586 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2588 struct algorithm
*x
;
2589 best_cost
= alg_in
->cost
;
2590 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2591 best_alg
->log
[best_alg
->ops
] = 0;
2592 best_alg
->op
[best_alg
->ops
] = alg_add_t_m2
;
2596 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2597 quickly with a - a * n for some appropriate constant n. */
2598 m
= exact_log2 (-orig_t
+ 1);
2599 if (m
>= 0 && m
< maxm
)
2601 op_cost
= shiftsub1_cost (speed
, mode
, m
);
2602 new_limit
.cost
= best_cost
.cost
- op_cost
;
2603 new_limit
.latency
= best_cost
.latency
- op_cost
;
2604 synth_mult (alg_in
, (unsigned HOST_WIDE_INT
) (-orig_t
+ 1) >> m
,
2607 alg_in
->cost
.cost
+= op_cost
;
2608 alg_in
->cost
.latency
+= op_cost
;
2609 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2611 struct algorithm
*x
;
2612 best_cost
= alg_in
->cost
;
2613 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2614 best_alg
->log
[best_alg
->ops
] = m
;
2615 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2623 /* Look for factors of t of the form
2624 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2625 If we find such a factor, we can multiply by t using an algorithm that
2626 multiplies by q, shift the result by m and add/subtract it to itself.
2628 We search for large factors first and loop down, even if large factors
2629 are less probable than small; if we find a large factor we will find a
2630 good sequence quickly, and therefore be able to prune (by decreasing
2631 COST_LIMIT) the search. */
2633 do_alg_addsub_factor
:
2634 for (m
= floor_log2 (t
- 1); m
>= 2; m
--)
2636 unsigned HOST_WIDE_INT d
;
2638 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) + 1;
2639 if (t
% d
== 0 && t
> d
&& m
< maxm
2640 && (!cache_hit
|| cache_alg
== alg_add_factor
))
2642 /* If the target has a cheap shift-and-add instruction use
2643 that in preference to a shift insn followed by an add insn.
2644 Assume that the shift-and-add is "atomic" with a latency
2645 equal to its cost, otherwise assume that on superscalar
2646 hardware the shift may be executed concurrently with the
2647 earlier steps in the algorithm. */
2648 op_cost
= add_cost (speed
, mode
) + shift_cost (speed
, mode
, m
);
2649 if (shiftadd_cost (speed
, mode
, m
) < op_cost
)
2651 op_cost
= shiftadd_cost (speed
, mode
, m
);
2652 op_latency
= op_cost
;
2655 op_latency
= add_cost (speed
, mode
);
2657 new_limit
.cost
= best_cost
.cost
- op_cost
;
2658 new_limit
.latency
= best_cost
.latency
- op_latency
;
2659 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2661 alg_in
->cost
.cost
+= op_cost
;
2662 alg_in
->cost
.latency
+= op_latency
;
2663 if (alg_in
->cost
.latency
< op_cost
)
2664 alg_in
->cost
.latency
= op_cost
;
2665 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2667 struct algorithm
*x
;
2668 best_cost
= alg_in
->cost
;
2669 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2670 best_alg
->log
[best_alg
->ops
] = m
;
2671 best_alg
->op
[best_alg
->ops
] = alg_add_factor
;
2673 /* Other factors will have been taken care of in the recursion. */
2677 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) - 1;
2678 if (t
% d
== 0 && t
> d
&& m
< maxm
2679 && (!cache_hit
|| cache_alg
== alg_sub_factor
))
2681 /* If the target has a cheap shift-and-subtract insn use
2682 that in preference to a shift insn followed by a sub insn.
2683 Assume that the shift-and-sub is "atomic" with a latency
2684 equal to it's cost, otherwise assume that on superscalar
2685 hardware the shift may be executed concurrently with the
2686 earlier steps in the algorithm. */
2687 op_cost
= add_cost (speed
, mode
) + shift_cost (speed
, mode
, m
);
2688 if (shiftsub0_cost (speed
, mode
, m
) < op_cost
)
2690 op_cost
= shiftsub0_cost (speed
, mode
, m
);
2691 op_latency
= op_cost
;
2694 op_latency
= add_cost (speed
, mode
);
2696 new_limit
.cost
= best_cost
.cost
- op_cost
;
2697 new_limit
.latency
= best_cost
.latency
- op_latency
;
2698 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2700 alg_in
->cost
.cost
+= op_cost
;
2701 alg_in
->cost
.latency
+= op_latency
;
2702 if (alg_in
->cost
.latency
< op_cost
)
2703 alg_in
->cost
.latency
= op_cost
;
2704 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2706 struct algorithm
*x
;
2707 best_cost
= alg_in
->cost
;
2708 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2709 best_alg
->log
[best_alg
->ops
] = m
;
2710 best_alg
->op
[best_alg
->ops
] = alg_sub_factor
;
2718 /* Try shift-and-add (load effective address) instructions,
2719 i.e. do a*3, a*5, a*9. */
2726 if (m
>= 0 && m
< maxm
)
2728 op_cost
= shiftadd_cost (speed
, mode
, m
);
2729 new_limit
.cost
= best_cost
.cost
- op_cost
;
2730 new_limit
.latency
= best_cost
.latency
- op_cost
;
2731 synth_mult (alg_in
, (t
- 1) >> m
, &new_limit
, mode
);
2733 alg_in
->cost
.cost
+= op_cost
;
2734 alg_in
->cost
.latency
+= op_cost
;
2735 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2737 struct algorithm
*x
;
2738 best_cost
= alg_in
->cost
;
2739 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2740 best_alg
->log
[best_alg
->ops
] = m
;
2741 best_alg
->op
[best_alg
->ops
] = alg_add_t2_m
;
2751 if (m
>= 0 && m
< maxm
)
2753 op_cost
= shiftsub0_cost (speed
, mode
, m
);
2754 new_limit
.cost
= best_cost
.cost
- op_cost
;
2755 new_limit
.latency
= best_cost
.latency
- op_cost
;
2756 synth_mult (alg_in
, (t
+ 1) >> m
, &new_limit
, mode
);
2758 alg_in
->cost
.cost
+= op_cost
;
2759 alg_in
->cost
.latency
+= op_cost
;
2760 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2762 struct algorithm
*x
;
2763 best_cost
= alg_in
->cost
;
2764 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2765 best_alg
->log
[best_alg
->ops
] = m
;
2766 best_alg
->op
[best_alg
->ops
] = alg_sub_t2_m
;
2774 /* If best_cost has not decreased, we have not found any algorithm. */
2775 if (!CHEAPER_MULT_COST (&best_cost
, cost_limit
))
2777 /* We failed to find an algorithm. Record alg_impossible for
2778 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2779 we are asked to find an algorithm for T within the same or
2780 lower COST_LIMIT, we can immediately return to the
2783 entry_ptr
->mode
= mode
;
2784 entry_ptr
->speed
= speed
;
2785 entry_ptr
->alg
= alg_impossible
;
2786 entry_ptr
->cost
= *cost_limit
;
2790 /* Cache the result. */
2794 entry_ptr
->mode
= mode
;
2795 entry_ptr
->speed
= speed
;
2796 entry_ptr
->alg
= best_alg
->op
[best_alg
->ops
];
2797 entry_ptr
->cost
.cost
= best_cost
.cost
;
2798 entry_ptr
->cost
.latency
= best_cost
.latency
;
2801 /* If we are getting a too long sequence for `struct algorithm'
2802 to record, make this search fail. */
2803 if (best_alg
->ops
== MAX_BITS_PER_WORD
)
2806 /* Copy the algorithm from temporary space to the space at alg_out.
2807 We avoid using structure assignment because the majority of
2808 best_alg is normally undefined, and this is a critical function. */
2809 alg_out
->ops
= best_alg
->ops
+ 1;
2810 alg_out
->cost
= best_cost
;
2811 memcpy (alg_out
->op
, best_alg
->op
,
2812 alg_out
->ops
* sizeof *alg_out
->op
);
2813 memcpy (alg_out
->log
, best_alg
->log
,
2814 alg_out
->ops
* sizeof *alg_out
->log
);
2817 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2818 Try three variations:
2820 - a shift/add sequence based on VAL itself
2821 - a shift/add sequence based on -VAL, followed by a negation
2822 - a shift/add sequence based on VAL - 1, followed by an addition.
2824 Return true if the cheapest of these cost less than MULT_COST,
2825 describing the algorithm in *ALG and final fixup in *VARIANT. */
2828 choose_mult_variant (enum machine_mode mode
, HOST_WIDE_INT val
,
2829 struct algorithm
*alg
, enum mult_variant
*variant
,
2832 struct algorithm alg2
;
2833 struct mult_cost limit
;
2835 bool speed
= optimize_insn_for_speed_p ();
2837 /* Fail quickly for impossible bounds. */
2841 /* Ensure that mult_cost provides a reasonable upper bound.
2842 Any constant multiplication can be performed with less
2843 than 2 * bits additions. */
2844 op_cost
= 2 * GET_MODE_UNIT_BITSIZE (mode
) * add_cost (speed
, mode
);
2845 if (mult_cost
> op_cost
)
2846 mult_cost
= op_cost
;
2848 *variant
= basic_variant
;
2849 limit
.cost
= mult_cost
;
2850 limit
.latency
= mult_cost
;
2851 synth_mult (alg
, val
, &limit
, mode
);
2853 /* This works only if the inverted value actually fits in an
2855 if (HOST_BITS_PER_INT
>= GET_MODE_UNIT_BITSIZE (mode
))
2857 op_cost
= neg_cost (speed
, mode
);
2858 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2860 limit
.cost
= alg
->cost
.cost
- op_cost
;
2861 limit
.latency
= alg
->cost
.latency
- op_cost
;
2865 limit
.cost
= mult_cost
- op_cost
;
2866 limit
.latency
= mult_cost
- op_cost
;
2869 synth_mult (&alg2
, -val
, &limit
, mode
);
2870 alg2
.cost
.cost
+= op_cost
;
2871 alg2
.cost
.latency
+= op_cost
;
2872 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2873 *alg
= alg2
, *variant
= negate_variant
;
2876 /* This proves very useful for division-by-constant. */
2877 op_cost
= add_cost (speed
, mode
);
2878 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2880 limit
.cost
= alg
->cost
.cost
- op_cost
;
2881 limit
.latency
= alg
->cost
.latency
- op_cost
;
2885 limit
.cost
= mult_cost
- op_cost
;
2886 limit
.latency
= mult_cost
- op_cost
;
2889 synth_mult (&alg2
, val
- 1, &limit
, mode
);
2890 alg2
.cost
.cost
+= op_cost
;
2891 alg2
.cost
.latency
+= op_cost
;
2892 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2893 *alg
= alg2
, *variant
= add_variant
;
2895 return MULT_COST_LESS (&alg
->cost
, mult_cost
);
2898 /* A subroutine of expand_mult, used for constant multiplications.
2899 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2900 convenient. Use the shift/add sequence described by ALG and apply
2901 the final fixup specified by VARIANT. */
2904 expand_mult_const (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT val
,
2905 rtx target
, const struct algorithm
*alg
,
2906 enum mult_variant variant
)
2908 HOST_WIDE_INT val_so_far
;
2909 rtx insn
, accum
, tem
;
2911 enum machine_mode nmode
;
2913 /* Avoid referencing memory over and over and invalid sharing
2915 op0
= force_reg (mode
, op0
);
2917 /* ACCUM starts out either as OP0 or as a zero, depending on
2918 the first operation. */
2920 if (alg
->op
[0] == alg_zero
)
2922 accum
= copy_to_mode_reg (mode
, CONST0_RTX (mode
));
2925 else if (alg
->op
[0] == alg_m
)
2927 accum
= copy_to_mode_reg (mode
, op0
);
2933 for (opno
= 1; opno
< alg
->ops
; opno
++)
2935 int log
= alg
->log
[opno
];
2936 rtx shift_subtarget
= optimize
? 0 : accum
;
2938 = (opno
== alg
->ops
- 1 && target
!= 0 && variant
!= add_variant
2941 rtx accum_target
= optimize
? 0 : accum
;
2944 switch (alg
->op
[opno
])
2947 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
2948 /* REG_EQUAL note will be attached to the following insn. */
2949 emit_move_insn (accum
, tem
);
2954 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
, log
, NULL_RTX
, 0);
2955 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2956 add_target
? add_target
: accum_target
);
2957 val_so_far
+= (HOST_WIDE_INT
) 1 << log
;
2961 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
, log
, NULL_RTX
, 0);
2962 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, tem
),
2963 add_target
? add_target
: accum_target
);
2964 val_so_far
-= (HOST_WIDE_INT
) 1 << log
;
2968 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2969 log
, shift_subtarget
, 0);
2970 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
),
2971 add_target
? add_target
: accum_target
);
2972 val_so_far
= (val_so_far
<< log
) + 1;
2976 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2977 log
, shift_subtarget
, 0);
2978 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, op0
),
2979 add_target
? add_target
: accum_target
);
2980 val_so_far
= (val_so_far
<< log
) - 1;
2983 case alg_add_factor
:
2984 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
2985 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2986 add_target
? add_target
: accum_target
);
2987 val_so_far
+= val_so_far
<< log
;
2990 case alg_sub_factor
:
2991 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
2992 accum
= force_operand (gen_rtx_MINUS (mode
, tem
, accum
),
2994 ? add_target
: (optimize
? 0 : tem
)));
2995 val_so_far
= (val_so_far
<< log
) - val_so_far
;
3002 if (SCALAR_INT_MODE_P (mode
))
3004 /* Write a REG_EQUAL note on the last insn so that we can cse
3005 multiplication sequences. Note that if ACCUM is a SUBREG,
3006 we've set the inner register and must properly indicate that. */
3007 tem
= op0
, nmode
= mode
;
3008 accum_inner
= accum
;
3009 if (GET_CODE (accum
) == SUBREG
)
3011 accum_inner
= SUBREG_REG (accum
);
3012 nmode
= GET_MODE (accum_inner
);
3013 tem
= gen_lowpart (nmode
, op0
);
3016 insn
= get_last_insn ();
3017 set_dst_reg_note (insn
, REG_EQUAL
,
3018 gen_rtx_MULT (nmode
, tem
,
3019 gen_int_mode (val_so_far
, nmode
)),
3024 if (variant
== negate_variant
)
3026 val_so_far
= -val_so_far
;
3027 accum
= expand_unop (mode
, neg_optab
, accum
, target
, 0);
3029 else if (variant
== add_variant
)
3031 val_so_far
= val_so_far
+ 1;
3032 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
), target
);
3035 /* Compare only the bits of val and val_so_far that are significant
3036 in the result mode, to avoid sign-/zero-extension confusion. */
3037 nmode
= GET_MODE_INNER (mode
);
3038 if (nmode
== VOIDmode
)
3040 val
&= GET_MODE_MASK (nmode
);
3041 val_so_far
&= GET_MODE_MASK (nmode
);
3042 gcc_assert (val
== val_so_far
);
3047 /* Perform a multiplication and return an rtx for the result.
3048 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3049 TARGET is a suggestion for where to store the result (an rtx).
3051 We check specially for a constant integer as OP1.
3052 If you want this check for OP0 as well, then before calling
3053 you should swap the two operands if OP0 would be constant. */
3056 expand_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3059 enum mult_variant variant
;
3060 struct algorithm algorithm
;
3063 bool speed
= optimize_insn_for_speed_p ();
3064 bool do_trapv
= flag_trapv
&& SCALAR_INT_MODE_P (mode
) && !unsignedp
;
3066 if (CONSTANT_P (op0
))
3073 /* For vectors, there are several simplifications that can be made if
3074 all elements of the vector constant are identical. */
3076 if (GET_CODE (op1
) == CONST_VECTOR
)
3078 int i
, n
= CONST_VECTOR_NUNITS (op1
);
3079 scalar_op1
= CONST_VECTOR_ELT (op1
, 0);
3080 for (i
= 1; i
< n
; ++i
)
3081 if (!rtx_equal_p (scalar_op1
, CONST_VECTOR_ELT (op1
, i
)))
3085 if (INTEGRAL_MODE_P (mode
))
3088 HOST_WIDE_INT coeff
;
3092 if (op1
== CONST0_RTX (mode
))
3094 if (op1
== CONST1_RTX (mode
))
3096 if (op1
== CONSTM1_RTX (mode
))
3097 return expand_unop (mode
, do_trapv
? negv_optab
: neg_optab
,
3103 /* If mode is integer vector mode, check if the backend supports
3104 vector lshift (by scalar or vector) at all. If not, we can't use
3105 synthetized multiply. */
3106 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
3107 && optab_handler (vashl_optab
, mode
) == CODE_FOR_nothing
3108 && optab_handler (ashl_optab
, mode
) == CODE_FOR_nothing
)
3111 /* These are the operations that are potentially turned into
3112 a sequence of shifts and additions. */
3113 mode_bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
3115 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3116 less than or equal in size to `unsigned int' this doesn't matter.
3117 If the mode is larger than `unsigned int', then synth_mult works
3118 only if the constant value exactly fits in an `unsigned int' without
3119 any truncation. This means that multiplying by negative values does
3120 not work; results are off by 2^32 on a 32 bit machine. */
3121 if (CONST_INT_P (scalar_op1
))
3123 coeff
= INTVAL (scalar_op1
);
3126 #if TARGET_SUPPORTS_WIDE_INT
3127 else if (CONST_WIDE_INT_P (scalar_op1
))
3129 else if (CONST_DOUBLE_AS_INT_P (scalar_op1
))
3132 int shift
= wi::exact_log2 (std::make_pair (scalar_op1
, mode
));
3133 /* Perfect power of 2 (other than 1, which is handled above). */
3135 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3136 shift
, target
, unsignedp
);
3143 /* We used to test optimize here, on the grounds that it's better to
3144 produce a smaller program when -O is not used. But this causes
3145 such a terrible slowdown sometimes that it seems better to always
3148 /* Special case powers of two. */
3149 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
)
3150 && !(is_neg
&& mode_bitsize
> HOST_BITS_PER_WIDE_INT
))
3151 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3152 floor_log2 (coeff
), target
, unsignedp
);
3154 fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3156 /* Attempt to handle multiplication of DImode values by negative
3157 coefficients, by performing the multiplication by a positive
3158 multiplier and then inverting the result. */
3159 if (is_neg
&& mode_bitsize
> HOST_BITS_PER_WIDE_INT
)
3161 /* Its safe to use -coeff even for INT_MIN, as the
3162 result is interpreted as an unsigned coefficient.
3163 Exclude cost of op0 from max_cost to match the cost
3164 calculation of the synth_mult. */
3165 coeff
= -(unsigned HOST_WIDE_INT
) coeff
;
3166 max_cost
= (set_src_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), speed
)
3167 - neg_cost (speed
, mode
));
3171 /* Special case powers of two. */
3172 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3174 rtx temp
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
3175 floor_log2 (coeff
), target
, unsignedp
);
3176 return expand_unop (mode
, neg_optab
, temp
, target
, 0);
3179 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3182 rtx temp
= expand_mult_const (mode
, op0
, coeff
, NULL_RTX
,
3183 &algorithm
, variant
);
3184 return expand_unop (mode
, neg_optab
, temp
, target
, 0);
3189 /* Exclude cost of op0 from max_cost to match the cost
3190 calculation of the synth_mult. */
3191 max_cost
= set_src_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), speed
);
3192 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
, max_cost
))
3193 return expand_mult_const (mode
, op0
, coeff
, target
,
3194 &algorithm
, variant
);
3198 /* Expand x*2.0 as x+x. */
3199 if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1
))
3202 REAL_VALUE_FROM_CONST_DOUBLE (d
, scalar_op1
);
3204 if (REAL_VALUES_EQUAL (d
, dconst2
))
3206 op0
= force_reg (GET_MODE (op0
), op0
);
3207 return expand_binop (mode
, add_optab
, op0
, op0
,
3208 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3213 /* This used to use umul_optab if unsigned, but for non-widening multiply
3214 there is no difference between signed and unsigned. */
3215 op0
= expand_binop (mode
, do_trapv
? smulv_optab
: smul_optab
,
3216 op0
, op1
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
3221 /* Return a cost estimate for multiplying a register by the given
3222 COEFFicient in the given MODE and SPEED. */
3225 mult_by_coeff_cost (HOST_WIDE_INT coeff
, enum machine_mode mode
, bool speed
)
3228 struct algorithm algorithm
;
3229 enum mult_variant variant
;
3231 rtx fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3232 max_cost
= set_src_cost (gen_rtx_MULT (mode
, fake_reg
, fake_reg
), speed
);
3233 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
, max_cost
))
3234 return algorithm
.cost
.cost
;
3239 /* Perform a widening multiplication and return an rtx for the result.
3240 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3241 TARGET is a suggestion for where to store the result (an rtx).
3242 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3243 or smul_widen_optab.
3245 We check specially for a constant integer as OP1, comparing the
3246 cost of a widening multiply against the cost of a sequence of shifts
3250 expand_widening_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3251 int unsignedp
, optab this_optab
)
3253 bool speed
= optimize_insn_for_speed_p ();
3256 if (CONST_INT_P (op1
)
3257 && GET_MODE (op0
) != VOIDmode
3258 && (cop1
= convert_modes (mode
, GET_MODE (op0
), op1
,
3259 this_optab
== umul_widen_optab
))
3260 && CONST_INT_P (cop1
)
3261 && (INTVAL (cop1
) >= 0
3262 || HWI_COMPUTABLE_MODE_P (mode
)))
3264 HOST_WIDE_INT coeff
= INTVAL (cop1
);
3266 enum mult_variant variant
;
3267 struct algorithm algorithm
;
3269 /* Special case powers of two. */
3270 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3272 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3273 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3274 floor_log2 (coeff
), target
, unsignedp
);
3277 /* Exclude cost of op0 from max_cost to match the cost
3278 calculation of the synth_mult. */
3279 max_cost
= mul_widen_cost (speed
, mode
);
3280 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3283 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3284 return expand_mult_const (mode
, op0
, coeff
, target
,
3285 &algorithm
, variant
);
3288 return expand_binop (mode
, this_optab
, op0
, op1
, target
,
3289 unsignedp
, OPTAB_LIB_WIDEN
);
3292 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3293 replace division by D, and put the least significant N bits of the result
3294 in *MULTIPLIER_PTR and return the most significant bit.
3296 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3297 needed precision is in PRECISION (should be <= N).
3299 PRECISION should be as small as possible so this function can choose
3300 multiplier more freely.
3302 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3303 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3305 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3306 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3308 unsigned HOST_WIDE_INT
3309 choose_multiplier (unsigned HOST_WIDE_INT d
, int n
, int precision
,
3310 unsigned HOST_WIDE_INT
*multiplier_ptr
,
3311 int *post_shift_ptr
, int *lgup_ptr
)
3313 int lgup
, post_shift
;
3316 /* lgup = ceil(log2(divisor)); */
3317 lgup
= ceil_log2 (d
);
3319 gcc_assert (lgup
<= n
);
3322 pow2
= n
+ lgup
- precision
;
3324 /* mlow = 2^(N + lgup)/d */
3325 wide_int val
= wi::set_bit_in_zero (pow
, HOST_BITS_PER_DOUBLE_INT
);
3326 wide_int mlow
= wi::udiv_trunc (val
, d
);
3328 /* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
3329 val
|= wi::set_bit_in_zero (pow2
, HOST_BITS_PER_DOUBLE_INT
);
3330 wide_int mhigh
= wi::udiv_trunc (val
, d
);
3332 /* If precision == N, then mlow, mhigh exceed 2^N
3333 (but they do not exceed 2^(N+1)). */
3335 /* Reduce to lowest terms. */
3336 for (post_shift
= lgup
; post_shift
> 0; post_shift
--)
3338 unsigned HOST_WIDE_INT ml_lo
= wi::extract_uhwi (mlow
, 1,
3339 HOST_BITS_PER_WIDE_INT
);
3340 unsigned HOST_WIDE_INT mh_lo
= wi::extract_uhwi (mhigh
, 1,
3341 HOST_BITS_PER_WIDE_INT
);
3345 mlow
= wi::uhwi (ml_lo
, HOST_BITS_PER_DOUBLE_INT
);
3346 mhigh
= wi::uhwi (mh_lo
, HOST_BITS_PER_DOUBLE_INT
);
3349 *post_shift_ptr
= post_shift
;
3351 if (n
< HOST_BITS_PER_WIDE_INT
)
3353 unsigned HOST_WIDE_INT mask
= ((unsigned HOST_WIDE_INT
) 1 << n
) - 1;
3354 *multiplier_ptr
= mhigh
.to_uhwi () & mask
;
3355 return mhigh
.to_uhwi () >= mask
;
3359 *multiplier_ptr
= mhigh
.to_uhwi ();
3360 return wi::extract_uhwi (mhigh
, HOST_BITS_PER_WIDE_INT
, 1);
3364 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3365 congruent to 1 (mod 2**N). */
3367 static unsigned HOST_WIDE_INT
3368 invert_mod2n (unsigned HOST_WIDE_INT x
, int n
)
3370 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3372 /* The algorithm notes that the choice y = x satisfies
3373 x*y == 1 mod 2^3, since x is assumed odd.
3374 Each iteration doubles the number of bits of significance in y. */
3376 unsigned HOST_WIDE_INT mask
;
3377 unsigned HOST_WIDE_INT y
= x
;
3380 mask
= (n
== HOST_BITS_PER_WIDE_INT
3381 ? ~(unsigned HOST_WIDE_INT
) 0
3382 : ((unsigned HOST_WIDE_INT
) 1 << n
) - 1);
3386 y
= y
* (2 - x
*y
) & mask
; /* Modulo 2^N */
3392 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3393 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3394 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3395 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3398 The result is put in TARGET if that is convenient.
3400 MODE is the mode of operation. */
3403 expand_mult_highpart_adjust (enum machine_mode mode
, rtx adj_operand
, rtx op0
,
3404 rtx op1
, rtx target
, int unsignedp
)
3407 enum rtx_code adj_code
= unsignedp
? PLUS
: MINUS
;
3409 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3410 GET_MODE_BITSIZE (mode
) - 1, NULL_RTX
, 0);
3411 tem
= expand_and (mode
, tem
, op1
, NULL_RTX
);
3413 = force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3416 tem
= expand_shift (RSHIFT_EXPR
, mode
, op1
,
3417 GET_MODE_BITSIZE (mode
) - 1, NULL_RTX
, 0);
3418 tem
= expand_and (mode
, tem
, op0
, NULL_RTX
);
3419 target
= force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3425 /* Subroutine of expmed_mult_highpart. Return the MODE high part of OP. */
3428 extract_high_half (enum machine_mode mode
, rtx op
)
3430 enum machine_mode wider_mode
;
3432 if (mode
== word_mode
)
3433 return gen_highpart (mode
, op
);
3435 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3437 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3438 op
= expand_shift (RSHIFT_EXPR
, wider_mode
, op
,
3439 GET_MODE_BITSIZE (mode
), 0, 1);
3440 return convert_modes (mode
, wider_mode
, op
, 0);
3443 /* Like expmed_mult_highpart, but only consider using a multiplication
3444 optab. OP1 is an rtx for the constant operand. */
3447 expmed_mult_highpart_optab (enum machine_mode mode
, rtx op0
, rtx op1
,
3448 rtx target
, int unsignedp
, int max_cost
)
3450 rtx narrow_op1
= gen_int_mode (INTVAL (op1
), mode
);
3451 enum machine_mode wider_mode
;
3455 bool speed
= optimize_insn_for_speed_p ();
3457 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3459 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3460 size
= GET_MODE_BITSIZE (mode
);
3462 /* Firstly, try using a multiplication insn that only generates the needed
3463 high part of the product, and in the sign flavor of unsignedp. */
3464 if (mul_highpart_cost (speed
, mode
) < max_cost
)
3466 moptab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
3467 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3468 unsignedp
, OPTAB_DIRECT
);
3473 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3474 Need to adjust the result after the multiplication. */
3475 if (size
- 1 < BITS_PER_WORD
3476 && (mul_highpart_cost (speed
, mode
)
3477 + 2 * shift_cost (speed
, mode
, size
-1)
3478 + 4 * add_cost (speed
, mode
) < max_cost
))
3480 moptab
= unsignedp
? smul_highpart_optab
: umul_highpart_optab
;
3481 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3482 unsignedp
, OPTAB_DIRECT
);
3484 /* We used the wrong signedness. Adjust the result. */
3485 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3489 /* Try widening multiplication. */
3490 moptab
= unsignedp
? umul_widen_optab
: smul_widen_optab
;
3491 if (widening_optab_handler (moptab
, wider_mode
, mode
) != CODE_FOR_nothing
3492 && mul_widen_cost (speed
, wider_mode
) < max_cost
)
3494 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
, 0,
3495 unsignedp
, OPTAB_WIDEN
);
3497 return extract_high_half (mode
, tem
);
3500 /* Try widening the mode and perform a non-widening multiplication. */
3501 if (optab_handler (smul_optab
, wider_mode
) != CODE_FOR_nothing
3502 && size
- 1 < BITS_PER_WORD
3503 && (mul_cost (speed
, wider_mode
) + shift_cost (speed
, mode
, size
-1)
3506 rtx insns
, wop0
, wop1
;
3508 /* We need to widen the operands, for example to ensure the
3509 constant multiplier is correctly sign or zero extended.
3510 Use a sequence to clean-up any instructions emitted by
3511 the conversions if things don't work out. */
3513 wop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
3514 wop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
3515 tem
= expand_binop (wider_mode
, smul_optab
, wop0
, wop1
, 0,
3516 unsignedp
, OPTAB_WIDEN
);
3517 insns
= get_insns ();
3523 return extract_high_half (mode
, tem
);
3527 /* Try widening multiplication of opposite signedness, and adjust. */
3528 moptab
= unsignedp
? smul_widen_optab
: umul_widen_optab
;
3529 if (widening_optab_handler (moptab
, wider_mode
, mode
) != CODE_FOR_nothing
3530 && size
- 1 < BITS_PER_WORD
3531 && (mul_widen_cost (speed
, wider_mode
)
3532 + 2 * shift_cost (speed
, mode
, size
-1)
3533 + 4 * add_cost (speed
, mode
) < max_cost
))
3535 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
,
3536 NULL_RTX
, ! unsignedp
, OPTAB_WIDEN
);
3539 tem
= extract_high_half (mode
, tem
);
3540 /* We used the wrong signedness. Adjust the result. */
3541 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3549 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3550 putting the high half of the result in TARGET if that is convenient,
3551 and return where the result is. If the operation can not be performed,
3554 MODE is the mode of operation and result.
3556 UNSIGNEDP nonzero means unsigned multiply.
3558 MAX_COST is the total allowed cost for the expanded RTL. */
3561 expmed_mult_highpart (enum machine_mode mode
, rtx op0
, rtx op1
,
3562 rtx target
, int unsignedp
, int max_cost
)
3564 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
3565 unsigned HOST_WIDE_INT cnst1
;
3567 bool sign_adjust
= false;
3568 enum mult_variant variant
;
3569 struct algorithm alg
;
3571 bool speed
= optimize_insn_for_speed_p ();
3573 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3574 /* We can't support modes wider than HOST_BITS_PER_INT. */
3575 gcc_assert (HWI_COMPUTABLE_MODE_P (mode
));
3577 cnst1
= INTVAL (op1
) & GET_MODE_MASK (mode
);
3579 /* We can't optimize modes wider than BITS_PER_WORD.
3580 ??? We might be able to perform double-word arithmetic if
3581 mode == word_mode, however all the cost calculations in
3582 synth_mult etc. assume single-word operations. */
3583 if (GET_MODE_BITSIZE (wider_mode
) > BITS_PER_WORD
)
3584 return expmed_mult_highpart_optab (mode
, op0
, op1
, target
,
3585 unsignedp
, max_cost
);
3587 extra_cost
= shift_cost (speed
, mode
, GET_MODE_BITSIZE (mode
) - 1);
3589 /* Check whether we try to multiply by a negative constant. */
3590 if (!unsignedp
&& ((cnst1
>> (GET_MODE_BITSIZE (mode
) - 1)) & 1))
3593 extra_cost
+= add_cost (speed
, mode
);
3596 /* See whether shift/add multiplication is cheap enough. */
3597 if (choose_mult_variant (wider_mode
, cnst1
, &alg
, &variant
,
3598 max_cost
- extra_cost
))
3600 /* See whether the specialized multiplication optabs are
3601 cheaper than the shift/add version. */
3602 tem
= expmed_mult_highpart_optab (mode
, op0
, op1
, target
, unsignedp
,
3603 alg
.cost
.cost
+ extra_cost
);
3607 tem
= convert_to_mode (wider_mode
, op0
, unsignedp
);
3608 tem
= expand_mult_const (wider_mode
, tem
, cnst1
, 0, &alg
, variant
);
3609 tem
= extract_high_half (mode
, tem
);
3611 /* Adjust result for signedness. */
3613 tem
= force_operand (gen_rtx_MINUS (mode
, tem
, op0
), tem
);
3617 return expmed_mult_highpart_optab (mode
, op0
, op1
, target
,
3618 unsignedp
, max_cost
);
3622 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3625 expand_smod_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3627 rtx result
, temp
, shift
, label
;
3629 int prec
= GET_MODE_PRECISION (mode
);
3631 logd
= floor_log2 (d
);
3632 result
= gen_reg_rtx (mode
);
3634 /* Avoid conditional branches when they're expensive. */
3635 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3636 && optimize_insn_for_speed_p ())
3638 rtx signmask
= emit_store_flag (result
, LT
, op0
, const0_rtx
,
3642 HOST_WIDE_INT masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3643 signmask
= force_reg (mode
, signmask
);
3644 shift
= GEN_INT (GET_MODE_BITSIZE (mode
) - logd
);
3646 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3647 which instruction sequence to use. If logical right shifts
3648 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3649 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3651 temp
= gen_rtx_LSHIFTRT (mode
, result
, shift
);
3652 if (optab_handler (lshr_optab
, mode
) == CODE_FOR_nothing
3653 || (set_src_cost (temp
, optimize_insn_for_speed_p ())
3654 > COSTS_N_INSNS (2)))
3656 temp
= expand_binop (mode
, xor_optab
, op0
, signmask
,
3657 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3658 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3659 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3660 temp
= expand_binop (mode
, and_optab
, temp
,
3661 gen_int_mode (masklow
, mode
),
3662 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3663 temp
= expand_binop (mode
, xor_optab
, temp
, signmask
,
3664 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3665 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3666 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3670 signmask
= expand_binop (mode
, lshr_optab
, signmask
, shift
,
3671 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3672 signmask
= force_reg (mode
, signmask
);
3674 temp
= expand_binop (mode
, add_optab
, op0
, signmask
,
3675 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3676 temp
= expand_binop (mode
, and_optab
, temp
,
3677 gen_int_mode (masklow
, mode
),
3678 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3679 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3680 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3686 /* Mask contains the mode's signbit and the significant bits of the
3687 modulus. By including the signbit in the operation, many targets
3688 can avoid an explicit compare operation in the following comparison
3690 wide_int mask
= wi::mask (logd
, false, prec
);
3691 mask
= wi::set_bit (mask
, prec
- 1);
3693 temp
= expand_binop (mode
, and_optab
, op0
,
3694 immed_wide_int_const (mask
, mode
),
3695 result
, 1, OPTAB_LIB_WIDEN
);
3697 emit_move_insn (result
, temp
);
3699 label
= gen_label_rtx ();
3700 do_cmp_and_jump (result
, const0_rtx
, GE
, mode
, label
);
3702 temp
= expand_binop (mode
, sub_optab
, result
, const1_rtx
, result
,
3703 0, OPTAB_LIB_WIDEN
);
3705 mask
= wi::mask (logd
, true, prec
);
3706 temp
= expand_binop (mode
, ior_optab
, temp
,
3707 immed_wide_int_const (mask
, mode
),
3708 result
, 1, OPTAB_LIB_WIDEN
);
3709 temp
= expand_binop (mode
, add_optab
, temp
, const1_rtx
, result
,
3710 0, OPTAB_LIB_WIDEN
);
3712 emit_move_insn (result
, temp
);
3717 /* Expand signed division of OP0 by a power of two D in mode MODE.
3718 This routine is only called for positive values of D. */
3721 expand_sdiv_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3726 logd
= floor_log2 (d
);
3729 && BRANCH_COST (optimize_insn_for_speed_p (),
3732 temp
= gen_reg_rtx (mode
);
3733 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, 1);
3734 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3735 0, OPTAB_LIB_WIDEN
);
3736 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
3739 #ifdef HAVE_conditional_move
3740 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3746 temp2
= copy_to_mode_reg (mode
, op0
);
3747 temp
= expand_binop (mode
, add_optab
, temp2
, gen_int_mode (d
- 1, mode
),
3748 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3749 temp
= force_reg (mode
, temp
);
3751 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3752 temp2
= emit_conditional_move (temp2
, LT
, temp2
, const0_rtx
,
3753 mode
, temp
, temp2
, mode
, 0);
3756 rtx seq
= get_insns ();
3759 return expand_shift (RSHIFT_EXPR
, mode
, temp2
, logd
, NULL_RTX
, 0);
3765 if (BRANCH_COST (optimize_insn_for_speed_p (),
3768 int ushift
= GET_MODE_BITSIZE (mode
) - logd
;
3770 temp
= gen_reg_rtx (mode
);
3771 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, -1);
3772 if (GET_MODE_BITSIZE (mode
) >= BITS_PER_WORD
3773 || shift_cost (optimize_insn_for_speed_p (), mode
, ushift
)
3774 > COSTS_N_INSNS (1))
3775 temp
= expand_binop (mode
, and_optab
, temp
, gen_int_mode (d
- 1, mode
),
3776 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3778 temp
= expand_shift (RSHIFT_EXPR
, mode
, temp
,
3779 ushift
, NULL_RTX
, 1);
3780 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3781 0, OPTAB_LIB_WIDEN
);
3782 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
3785 label
= gen_label_rtx ();
3786 temp
= copy_to_mode_reg (mode
, op0
);
3787 do_cmp_and_jump (temp
, const0_rtx
, GE
, mode
, label
);
3788 expand_inc (temp
, gen_int_mode (d
- 1, mode
));
3790 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
3793 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3794 if that is convenient, and returning where the result is.
3795 You may request either the quotient or the remainder as the result;
3796 specify REM_FLAG nonzero to get the remainder.
3798 CODE is the expression code for which kind of division this is;
3799 it controls how rounding is done. MODE is the machine mode to use.
3800 UNSIGNEDP nonzero means do unsigned division. */
3802 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3803 and then correct it by or'ing in missing high bits
3804 if result of ANDI is nonzero.
3805 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3806 This could optimize to a bfexts instruction.
3807 But C doesn't use these operations, so their optimizations are
3809 /* ??? For modulo, we don't actually need the highpart of the first product,
3810 the low part will do nicely. And for small divisors, the second multiply
3811 can also be a low-part only multiply or even be completely left out.
3812 E.g. to calculate the remainder of a division by 3 with a 32 bit
3813 multiply, multiply with 0x55555556 and extract the upper two bits;
3814 the result is exact for inputs up to 0x1fffffff.
3815 The input range can be reduced by using cross-sum rules.
3816 For odd divisors >= 3, the following table gives right shift counts
3817 so that if a number is shifted by an integer multiple of the given
3818 amount, the remainder stays the same:
3819 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3820 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3821 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3822 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3823 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3825 Cross-sum rules for even numbers can be derived by leaving as many bits
3826 to the right alone as the divisor has zeros to the right.
3827 E.g. if x is an unsigned 32 bit number:
3828 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3832 expand_divmod (int rem_flag
, enum tree_code code
, enum machine_mode mode
,
3833 rtx op0
, rtx op1
, rtx target
, int unsignedp
)
3835 enum machine_mode compute_mode
;
3837 rtx quotient
= 0, remainder
= 0;
3841 optab optab1
, optab2
;
3842 int op1_is_constant
, op1_is_pow2
= 0;
3843 int max_cost
, extra_cost
;
3844 static HOST_WIDE_INT last_div_const
= 0;
3845 bool speed
= optimize_insn_for_speed_p ();
3847 op1_is_constant
= CONST_INT_P (op1
);
3848 if (op1_is_constant
)
3850 unsigned HOST_WIDE_INT ext_op1
= UINTVAL (op1
);
3852 ext_op1
&= GET_MODE_MASK (mode
);
3853 op1_is_pow2
= ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1
)
3854 || (! unsignedp
&& EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1
))));
3858 This is the structure of expand_divmod:
3860 First comes code to fix up the operands so we can perform the operations
3861 correctly and efficiently.
3863 Second comes a switch statement with code specific for each rounding mode.
3864 For some special operands this code emits all RTL for the desired
3865 operation, for other cases, it generates only a quotient and stores it in
3866 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3867 to indicate that it has not done anything.
3869 Last comes code that finishes the operation. If QUOTIENT is set and
3870 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3871 QUOTIENT is not set, it is computed using trunc rounding.
3873 We try to generate special code for division and remainder when OP1 is a
3874 constant. If |OP1| = 2**n we can use shifts and some other fast
3875 operations. For other values of OP1, we compute a carefully selected
3876 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3879 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3880 half of the product. Different strategies for generating the product are
3881 implemented in expmed_mult_highpart.
3883 If what we actually want is the remainder, we generate that by another
3884 by-constant multiplication and a subtraction. */
3886 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3887 code below will malfunction if we are, so check here and handle
3888 the special case if so. */
3889 if (op1
== const1_rtx
)
3890 return rem_flag
? const0_rtx
: op0
;
3892 /* When dividing by -1, we could get an overflow.
3893 negv_optab can handle overflows. */
3894 if (! unsignedp
&& op1
== constm1_rtx
)
3898 return expand_unop (mode
, flag_trapv
&& GET_MODE_CLASS (mode
) == MODE_INT
3899 ? negv_optab
: neg_optab
, op0
, target
, 0);
3903 /* Don't use the function value register as a target
3904 since we have to read it as well as write it,
3905 and function-inlining gets confused by this. */
3906 && ((REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
3907 /* Don't clobber an operand while doing a multi-step calculation. */
3908 || ((rem_flag
|| op1_is_constant
)
3909 && (reg_mentioned_p (target
, op0
)
3910 || (MEM_P (op0
) && MEM_P (target
))))
3911 || reg_mentioned_p (target
, op1
)
3912 || (MEM_P (op1
) && MEM_P (target
))))
3915 /* Get the mode in which to perform this computation. Normally it will
3916 be MODE, but sometimes we can't do the desired operation in MODE.
3917 If so, pick a wider mode in which we can do the operation. Convert
3918 to that mode at the start to avoid repeated conversions.
3920 First see what operations we need. These depend on the expression
3921 we are evaluating. (We assume that divxx3 insns exist under the
3922 same conditions that modxx3 insns and that these insns don't normally
3923 fail. If these assumptions are not correct, we may generate less
3924 efficient code in some cases.)
3926 Then see if we find a mode in which we can open-code that operation
3927 (either a division, modulus, or shift). Finally, check for the smallest
3928 mode for which we can do the operation with a library call. */
3930 /* We might want to refine this now that we have division-by-constant
3931 optimization. Since expmed_mult_highpart tries so many variants, it is
3932 not straightforward to generalize this. Maybe we should make an array
3933 of possible modes in init_expmed? Save this for GCC 2.7. */
3935 optab1
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3936 ? (unsignedp
? lshr_optab
: ashr_optab
)
3937 : (unsignedp
? udiv_optab
: sdiv_optab
));
3938 optab2
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3940 : (unsignedp
? udivmod_optab
: sdivmod_optab
));
3942 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3943 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3944 if (optab_handler (optab1
, compute_mode
) != CODE_FOR_nothing
3945 || optab_handler (optab2
, compute_mode
) != CODE_FOR_nothing
)
3948 if (compute_mode
== VOIDmode
)
3949 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3950 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3951 if (optab_libfunc (optab1
, compute_mode
)
3952 || optab_libfunc (optab2
, compute_mode
))
3955 /* If we still couldn't find a mode, use MODE, but expand_binop will
3957 if (compute_mode
== VOIDmode
)
3958 compute_mode
= mode
;
3960 if (target
&& GET_MODE (target
) == compute_mode
)
3963 tquotient
= gen_reg_rtx (compute_mode
);
3965 size
= GET_MODE_BITSIZE (compute_mode
);
3967 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3968 (mode), and thereby get better code when OP1 is a constant. Do that
3969 later. It will require going over all usages of SIZE below. */
3970 size
= GET_MODE_BITSIZE (mode
);
3973 /* Only deduct something for a REM if the last divide done was
3974 for a different constant. Then set the constant of the last
3976 max_cost
= (unsignedp
3977 ? udiv_cost (speed
, compute_mode
)
3978 : sdiv_cost (speed
, compute_mode
));
3979 if (rem_flag
&& ! (last_div_const
!= 0 && op1_is_constant
3980 && INTVAL (op1
) == last_div_const
))
3981 max_cost
-= (mul_cost (speed
, compute_mode
)
3982 + add_cost (speed
, compute_mode
));
3984 last_div_const
= ! rem_flag
&& op1_is_constant
? INTVAL (op1
) : 0;
3986 /* Now convert to the best mode to use. */
3987 if (compute_mode
!= mode
)
3989 op0
= convert_modes (compute_mode
, mode
, op0
, unsignedp
);
3990 op1
= convert_modes (compute_mode
, mode
, op1
, unsignedp
);
3992 /* convert_modes may have placed op1 into a register, so we
3993 must recompute the following. */
3994 op1_is_constant
= CONST_INT_P (op1
);
3995 op1_is_pow2
= (op1_is_constant
3996 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
3998 && EXACT_POWER_OF_2_OR_ZERO_P (-UINTVAL (op1
))))));
4001 /* If one of the operands is a volatile MEM, copy it into a register. */
4003 if (MEM_P (op0
) && MEM_VOLATILE_P (op0
))
4004 op0
= force_reg (compute_mode
, op0
);
4005 if (MEM_P (op1
) && MEM_VOLATILE_P (op1
))
4006 op1
= force_reg (compute_mode
, op1
);
4008 /* If we need the remainder or if OP1 is constant, we need to
4009 put OP0 in a register in case it has any queued subexpressions. */
4010 if (rem_flag
|| op1_is_constant
)
4011 op0
= force_reg (compute_mode
, op0
);
4013 last
= get_last_insn ();
4015 /* Promote floor rounding to trunc rounding for unsigned operations. */
4018 if (code
== FLOOR_DIV_EXPR
)
4019 code
= TRUNC_DIV_EXPR
;
4020 if (code
== FLOOR_MOD_EXPR
)
4021 code
= TRUNC_MOD_EXPR
;
4022 if (code
== EXACT_DIV_EXPR
&& op1_is_pow2
)
4023 code
= TRUNC_DIV_EXPR
;
4026 if (op1
!= const0_rtx
)
4029 case TRUNC_MOD_EXPR
:
4030 case TRUNC_DIV_EXPR
:
4031 if (op1_is_constant
)
4035 unsigned HOST_WIDE_INT mh
, ml
;
4036 int pre_shift
, post_shift
;
4038 unsigned HOST_WIDE_INT d
= (INTVAL (op1
)
4039 & GET_MODE_MASK (compute_mode
));
4041 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4043 pre_shift
= floor_log2 (d
);
4046 unsigned HOST_WIDE_INT mask
4047 = ((unsigned HOST_WIDE_INT
) 1 << pre_shift
) - 1;
4049 = expand_binop (compute_mode
, and_optab
, op0
,
4050 gen_int_mode (mask
, compute_mode
),
4054 return gen_lowpart (mode
, remainder
);
4056 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4057 pre_shift
, tquotient
, 1);
4059 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4061 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (size
- 1)))
4063 /* Most significant bit of divisor is set; emit an scc
4065 quotient
= emit_store_flag_force (tquotient
, GEU
, op0
, op1
,
4066 compute_mode
, 1, 1);
4070 /* Find a suitable multiplier and right shift count
4071 instead of multiplying with D. */
4073 mh
= choose_multiplier (d
, size
, size
,
4074 &ml
, &post_shift
, &dummy
);
4076 /* If the suggested multiplier is more than SIZE bits,
4077 we can do better for even divisors, using an
4078 initial right shift. */
4079 if (mh
!= 0 && (d
& 1) == 0)
4081 pre_shift
= floor_log2 (d
& -d
);
4082 mh
= choose_multiplier (d
>> pre_shift
, size
,
4084 &ml
, &post_shift
, &dummy
);
4094 if (post_shift
- 1 >= BITS_PER_WORD
)
4098 = (shift_cost (speed
, compute_mode
, post_shift
- 1)
4099 + shift_cost (speed
, compute_mode
, 1)
4100 + 2 * add_cost (speed
, compute_mode
));
4101 t1
= expmed_mult_highpart
4103 gen_int_mode (ml
, compute_mode
),
4104 NULL_RTX
, 1, max_cost
- extra_cost
);
4107 t2
= force_operand (gen_rtx_MINUS (compute_mode
,
4110 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
,
4111 t2
, 1, NULL_RTX
, 1);
4112 t4
= force_operand (gen_rtx_PLUS (compute_mode
,
4115 quotient
= expand_shift
4116 (RSHIFT_EXPR
, compute_mode
, t4
,
4117 post_shift
- 1, tquotient
, 1);
4123 if (pre_shift
>= BITS_PER_WORD
4124 || post_shift
>= BITS_PER_WORD
)
4128 (RSHIFT_EXPR
, compute_mode
, op0
,
4129 pre_shift
, NULL_RTX
, 1);
4131 = (shift_cost (speed
, compute_mode
, pre_shift
)
4132 + shift_cost (speed
, compute_mode
, post_shift
));
4133 t2
= expmed_mult_highpart
4135 gen_int_mode (ml
, compute_mode
),
4136 NULL_RTX
, 1, max_cost
- extra_cost
);
4139 quotient
= expand_shift
4140 (RSHIFT_EXPR
, compute_mode
, t2
,
4141 post_shift
, tquotient
, 1);
4145 else /* Too wide mode to use tricky code */
4148 insn
= get_last_insn ();
4150 set_dst_reg_note (insn
, REG_EQUAL
,
4151 gen_rtx_UDIV (compute_mode
, op0
, op1
),
4154 else /* TRUNC_DIV, signed */
4156 unsigned HOST_WIDE_INT ml
;
4157 int lgup
, post_shift
;
4159 HOST_WIDE_INT d
= INTVAL (op1
);
4160 unsigned HOST_WIDE_INT abs_d
;
4162 /* Since d might be INT_MIN, we have to cast to
4163 unsigned HOST_WIDE_INT before negating to avoid
4164 undefined signed overflow. */
4166 ? (unsigned HOST_WIDE_INT
) d
4167 : - (unsigned HOST_WIDE_INT
) d
);
4169 /* n rem d = n rem -d */
4170 if (rem_flag
&& d
< 0)
4173 op1
= gen_int_mode (abs_d
, compute_mode
);
4179 quotient
= expand_unop (compute_mode
, neg_optab
, op0
,
4181 else if (HOST_BITS_PER_WIDE_INT
>= size
4182 && abs_d
== (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4184 /* This case is not handled correctly below. */
4185 quotient
= emit_store_flag (tquotient
, EQ
, op0
, op1
,
4186 compute_mode
, 1, 1);
4190 else if (EXACT_POWER_OF_2_OR_ZERO_P (d
)
4192 ? smod_pow2_cheap (speed
, compute_mode
)
4193 : sdiv_pow2_cheap (speed
, compute_mode
))
4194 /* We assume that cheap metric is true if the
4195 optab has an expander for this mode. */
4196 && ((optab_handler ((rem_flag
? smod_optab
4199 != CODE_FOR_nothing
)
4200 || (optab_handler (sdivmod_optab
,
4202 != CODE_FOR_nothing
)))
4204 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d
))
4208 remainder
= expand_smod_pow2 (compute_mode
, op0
, d
);
4210 return gen_lowpart (mode
, remainder
);
4213 if (sdiv_pow2_cheap (speed
, compute_mode
)
4214 && ((optab_handler (sdiv_optab
, compute_mode
)
4215 != CODE_FOR_nothing
)
4216 || (optab_handler (sdivmod_optab
, compute_mode
)
4217 != CODE_FOR_nothing
)))
4218 quotient
= expand_divmod (0, TRUNC_DIV_EXPR
,
4220 gen_int_mode (abs_d
,
4224 quotient
= expand_sdiv_pow2 (compute_mode
, op0
, abs_d
);
4226 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4227 negate the quotient. */
4230 insn
= get_last_insn ();
4232 && abs_d
< ((unsigned HOST_WIDE_INT
) 1
4233 << (HOST_BITS_PER_WIDE_INT
- 1)))
4234 set_dst_reg_note (insn
, REG_EQUAL
,
4235 gen_rtx_DIV (compute_mode
, op0
,
4241 quotient
= expand_unop (compute_mode
, neg_optab
,
4242 quotient
, quotient
, 0);
4245 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4247 choose_multiplier (abs_d
, size
, size
- 1,
4248 &ml
, &post_shift
, &lgup
);
4249 if (ml
< (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4253 if (post_shift
>= BITS_PER_WORD
4254 || size
- 1 >= BITS_PER_WORD
)
4257 extra_cost
= (shift_cost (speed
, compute_mode
, post_shift
)
4258 + shift_cost (speed
, compute_mode
, size
- 1)
4259 + add_cost (speed
, compute_mode
));
4260 t1
= expmed_mult_highpart
4261 (compute_mode
, op0
, gen_int_mode (ml
, compute_mode
),
4262 NULL_RTX
, 0, max_cost
- extra_cost
);
4266 (RSHIFT_EXPR
, compute_mode
, t1
,
4267 post_shift
, NULL_RTX
, 0);
4269 (RSHIFT_EXPR
, compute_mode
, op0
,
4270 size
- 1, NULL_RTX
, 0);
4273 = force_operand (gen_rtx_MINUS (compute_mode
,
4278 = force_operand (gen_rtx_MINUS (compute_mode
,
4286 if (post_shift
>= BITS_PER_WORD
4287 || size
- 1 >= BITS_PER_WORD
)
4290 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (size
- 1);
4291 mlr
= gen_int_mode (ml
, compute_mode
);
4292 extra_cost
= (shift_cost (speed
, compute_mode
, post_shift
)
4293 + shift_cost (speed
, compute_mode
, size
- 1)
4294 + 2 * add_cost (speed
, compute_mode
));
4295 t1
= expmed_mult_highpart (compute_mode
, op0
, mlr
,
4297 max_cost
- extra_cost
);
4300 t2
= force_operand (gen_rtx_PLUS (compute_mode
,
4304 (RSHIFT_EXPR
, compute_mode
, t2
,
4305 post_shift
, NULL_RTX
, 0);
4307 (RSHIFT_EXPR
, compute_mode
, op0
,
4308 size
- 1, NULL_RTX
, 0);
4311 = force_operand (gen_rtx_MINUS (compute_mode
,
4316 = force_operand (gen_rtx_MINUS (compute_mode
,
4321 else /* Too wide mode to use tricky code */
4324 insn
= get_last_insn ();
4326 set_dst_reg_note (insn
, REG_EQUAL
,
4327 gen_rtx_DIV (compute_mode
, op0
, op1
),
4333 delete_insns_since (last
);
4336 case FLOOR_DIV_EXPR
:
4337 case FLOOR_MOD_EXPR
:
4338 /* We will come here only for signed operations. */
4339 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4341 unsigned HOST_WIDE_INT mh
, ml
;
4342 int pre_shift
, lgup
, post_shift
;
4343 HOST_WIDE_INT d
= INTVAL (op1
);
4347 /* We could just as easily deal with negative constants here,
4348 but it does not seem worth the trouble for GCC 2.6. */
4349 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4351 pre_shift
= floor_log2 (d
);
4354 unsigned HOST_WIDE_INT mask
4355 = ((unsigned HOST_WIDE_INT
) 1 << pre_shift
) - 1;
4356 remainder
= expand_binop
4357 (compute_mode
, and_optab
, op0
,
4358 gen_int_mode (mask
, compute_mode
),
4359 remainder
, 0, OPTAB_LIB_WIDEN
);
4361 return gen_lowpart (mode
, remainder
);
4363 quotient
= expand_shift
4364 (RSHIFT_EXPR
, compute_mode
, op0
,
4365 pre_shift
, tquotient
, 0);
4371 mh
= choose_multiplier (d
, size
, size
- 1,
4372 &ml
, &post_shift
, &lgup
);
4375 if (post_shift
< BITS_PER_WORD
4376 && size
- 1 < BITS_PER_WORD
)
4379 (RSHIFT_EXPR
, compute_mode
, op0
,
4380 size
- 1, NULL_RTX
, 0);
4381 t2
= expand_binop (compute_mode
, xor_optab
, op0
, t1
,
4382 NULL_RTX
, 0, OPTAB_WIDEN
);
4383 extra_cost
= (shift_cost (speed
, compute_mode
, post_shift
)
4384 + shift_cost (speed
, compute_mode
, size
- 1)
4385 + 2 * add_cost (speed
, compute_mode
));
4386 t3
= expmed_mult_highpart
4387 (compute_mode
, t2
, gen_int_mode (ml
, compute_mode
),
4388 NULL_RTX
, 1, max_cost
- extra_cost
);
4392 (RSHIFT_EXPR
, compute_mode
, t3
,
4393 post_shift
, NULL_RTX
, 1);
4394 quotient
= expand_binop (compute_mode
, xor_optab
,
4395 t4
, t1
, tquotient
, 0,
4403 rtx nsign
, t1
, t2
, t3
, t4
;
4404 t1
= force_operand (gen_rtx_PLUS (compute_mode
,
4405 op0
, constm1_rtx
), NULL_RTX
);
4406 t2
= expand_binop (compute_mode
, ior_optab
, op0
, t1
, NULL_RTX
,
4408 nsign
= expand_shift
4409 (RSHIFT_EXPR
, compute_mode
, t2
,
4410 size
- 1, NULL_RTX
, 0);
4411 t3
= force_operand (gen_rtx_MINUS (compute_mode
, t1
, nsign
),
4413 t4
= expand_divmod (0, TRUNC_DIV_EXPR
, compute_mode
, t3
, op1
,
4418 t5
= expand_unop (compute_mode
, one_cmpl_optab
, nsign
,
4420 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4429 delete_insns_since (last
);
4431 /* Try using an instruction that produces both the quotient and
4432 remainder, using truncation. We can easily compensate the quotient
4433 or remainder to get floor rounding, once we have the remainder.
4434 Notice that we compute also the final remainder value here,
4435 and return the result right away. */
4436 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4437 target
= gen_reg_rtx (compute_mode
);
4442 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4443 quotient
= gen_reg_rtx (compute_mode
);
4448 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4449 remainder
= gen_reg_rtx (compute_mode
);
4452 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
,
4453 quotient
, remainder
, 0))
4455 /* This could be computed with a branch-less sequence.
4456 Save that for later. */
4458 rtx label
= gen_label_rtx ();
4459 do_cmp_and_jump (remainder
, const0_rtx
, EQ
, compute_mode
, label
);
4460 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4461 NULL_RTX
, 0, OPTAB_WIDEN
);
4462 do_cmp_and_jump (tem
, const0_rtx
, GE
, compute_mode
, label
);
4463 expand_dec (quotient
, const1_rtx
);
4464 expand_inc (remainder
, op1
);
4466 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4469 /* No luck with division elimination or divmod. Have to do it
4470 by conditionally adjusting op0 *and* the result. */
4472 rtx label1
, label2
, label3
, label4
, label5
;
4476 quotient
= gen_reg_rtx (compute_mode
);
4477 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4478 label1
= gen_label_rtx ();
4479 label2
= gen_label_rtx ();
4480 label3
= gen_label_rtx ();
4481 label4
= gen_label_rtx ();
4482 label5
= gen_label_rtx ();
4483 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4484 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
, compute_mode
, label1
);
4485 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4486 quotient
, 0, OPTAB_LIB_WIDEN
);
4487 if (tem
!= quotient
)
4488 emit_move_insn (quotient
, tem
);
4489 emit_jump_insn (gen_jump (label5
));
4491 emit_label (label1
);
4492 expand_inc (adjusted_op0
, const1_rtx
);
4493 emit_jump_insn (gen_jump (label4
));
4495 emit_label (label2
);
4496 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
, compute_mode
, label3
);
4497 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4498 quotient
, 0, OPTAB_LIB_WIDEN
);
4499 if (tem
!= quotient
)
4500 emit_move_insn (quotient
, tem
);
4501 emit_jump_insn (gen_jump (label5
));
4503 emit_label (label3
);
4504 expand_dec (adjusted_op0
, const1_rtx
);
4505 emit_label (label4
);
4506 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4507 quotient
, 0, OPTAB_LIB_WIDEN
);
4508 if (tem
!= quotient
)
4509 emit_move_insn (quotient
, tem
);
4510 expand_dec (quotient
, const1_rtx
);
4511 emit_label (label5
);
4519 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
)))
4522 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4523 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4524 floor_log2 (d
), tquotient
, 1);
4525 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4526 gen_int_mode (d
- 1, compute_mode
),
4527 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4528 t3
= gen_reg_rtx (compute_mode
);
4529 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4530 compute_mode
, 1, 1);
4534 lab
= gen_label_rtx ();
4535 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4536 expand_inc (t1
, const1_rtx
);
4541 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4547 /* Try using an instruction that produces both the quotient and
4548 remainder, using truncation. We can easily compensate the
4549 quotient or remainder to get ceiling rounding, once we have the
4550 remainder. Notice that we compute also the final remainder
4551 value here, and return the result right away. */
4552 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4553 target
= gen_reg_rtx (compute_mode
);
4557 remainder
= (REG_P (target
)
4558 ? target
: gen_reg_rtx (compute_mode
));
4559 quotient
= gen_reg_rtx (compute_mode
);
4563 quotient
= (REG_P (target
)
4564 ? target
: gen_reg_rtx (compute_mode
));
4565 remainder
= gen_reg_rtx (compute_mode
);
4568 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
,
4571 /* This could be computed with a branch-less sequence.
4572 Save that for later. */
4573 rtx label
= gen_label_rtx ();
4574 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4575 compute_mode
, label
);
4576 expand_inc (quotient
, const1_rtx
);
4577 expand_dec (remainder
, op1
);
4579 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4582 /* No luck with division elimination or divmod. Have to do it
4583 by conditionally adjusting op0 *and* the result. */
4586 rtx adjusted_op0
, tem
;
4588 quotient
= gen_reg_rtx (compute_mode
);
4589 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4590 label1
= gen_label_rtx ();
4591 label2
= gen_label_rtx ();
4592 do_cmp_and_jump (adjusted_op0
, const0_rtx
, NE
,
4593 compute_mode
, label1
);
4594 emit_move_insn (quotient
, const0_rtx
);
4595 emit_jump_insn (gen_jump (label2
));
4597 emit_label (label1
);
4598 expand_dec (adjusted_op0
, const1_rtx
);
4599 tem
= expand_binop (compute_mode
, udiv_optab
, adjusted_op0
, op1
,
4600 quotient
, 1, OPTAB_LIB_WIDEN
);
4601 if (tem
!= quotient
)
4602 emit_move_insn (quotient
, tem
);
4603 expand_inc (quotient
, const1_rtx
);
4604 emit_label (label2
);
4609 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4610 && INTVAL (op1
) >= 0)
4612 /* This is extremely similar to the code for the unsigned case
4613 above. For 2.7 we should merge these variants, but for
4614 2.6.1 I don't want to touch the code for unsigned since that
4615 get used in C. The signed case will only be used by other
4619 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4620 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4621 floor_log2 (d
), tquotient
, 0);
4622 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4623 gen_int_mode (d
- 1, compute_mode
),
4624 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4625 t3
= gen_reg_rtx (compute_mode
);
4626 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4627 compute_mode
, 1, 1);
4631 lab
= gen_label_rtx ();
4632 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4633 expand_inc (t1
, const1_rtx
);
4638 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4644 /* Try using an instruction that produces both the quotient and
4645 remainder, using truncation. We can easily compensate the
4646 quotient or remainder to get ceiling rounding, once we have the
4647 remainder. Notice that we compute also the final remainder
4648 value here, and return the result right away. */
4649 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4650 target
= gen_reg_rtx (compute_mode
);
4653 remainder
= (REG_P (target
)
4654 ? target
: gen_reg_rtx (compute_mode
));
4655 quotient
= gen_reg_rtx (compute_mode
);
4659 quotient
= (REG_P (target
)
4660 ? target
: gen_reg_rtx (compute_mode
));
4661 remainder
= gen_reg_rtx (compute_mode
);
4664 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
,
4667 /* This could be computed with a branch-less sequence.
4668 Save that for later. */
4670 rtx label
= gen_label_rtx ();
4671 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4672 compute_mode
, label
);
4673 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4674 NULL_RTX
, 0, OPTAB_WIDEN
);
4675 do_cmp_and_jump (tem
, const0_rtx
, LT
, compute_mode
, label
);
4676 expand_inc (quotient
, const1_rtx
);
4677 expand_dec (remainder
, op1
);
4679 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4682 /* No luck with division elimination or divmod. Have to do it
4683 by conditionally adjusting op0 *and* the result. */
4685 rtx label1
, label2
, label3
, label4
, label5
;
4689 quotient
= gen_reg_rtx (compute_mode
);
4690 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4691 label1
= gen_label_rtx ();
4692 label2
= gen_label_rtx ();
4693 label3
= gen_label_rtx ();
4694 label4
= gen_label_rtx ();
4695 label5
= gen_label_rtx ();
4696 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4697 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
,
4698 compute_mode
, label1
);
4699 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4700 quotient
, 0, OPTAB_LIB_WIDEN
);
4701 if (tem
!= quotient
)
4702 emit_move_insn (quotient
, tem
);
4703 emit_jump_insn (gen_jump (label5
));
4705 emit_label (label1
);
4706 expand_dec (adjusted_op0
, const1_rtx
);
4707 emit_jump_insn (gen_jump (label4
));
4709 emit_label (label2
);
4710 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
,
4711 compute_mode
, label3
);
4712 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4713 quotient
, 0, OPTAB_LIB_WIDEN
);
4714 if (tem
!= quotient
)
4715 emit_move_insn (quotient
, tem
);
4716 emit_jump_insn (gen_jump (label5
));
4718 emit_label (label3
);
4719 expand_inc (adjusted_op0
, const1_rtx
);
4720 emit_label (label4
);
4721 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4722 quotient
, 0, OPTAB_LIB_WIDEN
);
4723 if (tem
!= quotient
)
4724 emit_move_insn (quotient
, tem
);
4725 expand_inc (quotient
, const1_rtx
);
4726 emit_label (label5
);
4731 case EXACT_DIV_EXPR
:
4732 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4734 HOST_WIDE_INT d
= INTVAL (op1
);
4735 unsigned HOST_WIDE_INT ml
;
4739 pre_shift
= floor_log2 (d
& -d
);
4740 ml
= invert_mod2n (d
>> pre_shift
, size
);
4741 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4742 pre_shift
, NULL_RTX
, unsignedp
);
4743 quotient
= expand_mult (compute_mode
, t1
,
4744 gen_int_mode (ml
, compute_mode
),
4747 insn
= get_last_insn ();
4748 set_dst_reg_note (insn
, REG_EQUAL
,
4749 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
4750 compute_mode
, op0
, op1
),
4755 case ROUND_DIV_EXPR
:
4756 case ROUND_MOD_EXPR
:
4761 label
= gen_label_rtx ();
4762 quotient
= gen_reg_rtx (compute_mode
);
4763 remainder
= gen_reg_rtx (compute_mode
);
4764 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
, remainder
, 1) == 0)
4767 quotient
= expand_binop (compute_mode
, udiv_optab
, op0
, op1
,
4768 quotient
, 1, OPTAB_LIB_WIDEN
);
4769 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 1);
4770 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4771 remainder
, 1, OPTAB_LIB_WIDEN
);
4773 tem
= plus_constant (compute_mode
, op1
, -1);
4774 tem
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
, 1, NULL_RTX
, 1);
4775 do_cmp_and_jump (remainder
, tem
, LEU
, compute_mode
, label
);
4776 expand_inc (quotient
, const1_rtx
);
4777 expand_dec (remainder
, op1
);
4782 rtx abs_rem
, abs_op1
, tem
, mask
;
4784 label
= gen_label_rtx ();
4785 quotient
= gen_reg_rtx (compute_mode
);
4786 remainder
= gen_reg_rtx (compute_mode
);
4787 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
, remainder
, 0) == 0)
4790 quotient
= expand_binop (compute_mode
, sdiv_optab
, op0
, op1
,
4791 quotient
, 0, OPTAB_LIB_WIDEN
);
4792 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 0);
4793 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4794 remainder
, 0, OPTAB_LIB_WIDEN
);
4796 abs_rem
= expand_abs (compute_mode
, remainder
, NULL_RTX
, 1, 0);
4797 abs_op1
= expand_abs (compute_mode
, op1
, NULL_RTX
, 1, 0);
4798 tem
= expand_shift (LSHIFT_EXPR
, compute_mode
, abs_rem
,
4800 do_cmp_and_jump (tem
, abs_op1
, LTU
, compute_mode
, label
);
4801 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4802 NULL_RTX
, 0, OPTAB_WIDEN
);
4803 mask
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4804 size
- 1, NULL_RTX
, 0);
4805 tem
= expand_binop (compute_mode
, xor_optab
, mask
, const1_rtx
,
4806 NULL_RTX
, 0, OPTAB_WIDEN
);
4807 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4808 NULL_RTX
, 0, OPTAB_WIDEN
);
4809 expand_inc (quotient
, tem
);
4810 tem
= expand_binop (compute_mode
, xor_optab
, mask
, op1
,
4811 NULL_RTX
, 0, OPTAB_WIDEN
);
4812 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4813 NULL_RTX
, 0, OPTAB_WIDEN
);
4814 expand_dec (remainder
, tem
);
4817 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4825 if (target
&& GET_MODE (target
) != compute_mode
)
4830 /* Try to produce the remainder without producing the quotient.
4831 If we seem to have a divmod pattern that does not require widening,
4832 don't try widening here. We should really have a WIDEN argument
4833 to expand_twoval_binop, since what we'd really like to do here is
4834 1) try a mod insn in compute_mode
4835 2) try a divmod insn in compute_mode
4836 3) try a div insn in compute_mode and multiply-subtract to get
4838 4) try the same things with widening allowed. */
4840 = sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4843 ((optab_handler (optab2
, compute_mode
)
4844 != CODE_FOR_nothing
)
4845 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4848 /* No luck there. Can we do remainder and divide at once
4849 without a library call? */
4850 remainder
= gen_reg_rtx (compute_mode
);
4851 if (! expand_twoval_binop ((unsignedp
4855 NULL_RTX
, remainder
, unsignedp
))
4860 return gen_lowpart (mode
, remainder
);
4863 /* Produce the quotient. Try a quotient insn, but not a library call.
4864 If we have a divmod in this mode, use it in preference to widening
4865 the div (for this test we assume it will not fail). Note that optab2
4866 is set to the one of the two optabs that the call below will use. */
4868 = sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
4869 op0
, op1
, rem_flag
? NULL_RTX
: target
,
4871 ((optab_handler (optab2
, compute_mode
)
4872 != CODE_FOR_nothing
)
4873 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4877 /* No luck there. Try a quotient-and-remainder insn,
4878 keeping the quotient alone. */
4879 quotient
= gen_reg_rtx (compute_mode
);
4880 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
4882 quotient
, NULL_RTX
, unsignedp
))
4886 /* Still no luck. If we are not computing the remainder,
4887 use a library call for the quotient. */
4888 quotient
= sign_expand_binop (compute_mode
,
4889 udiv_optab
, sdiv_optab
,
4891 unsignedp
, OPTAB_LIB_WIDEN
);
4898 if (target
&& GET_MODE (target
) != compute_mode
)
4903 /* No divide instruction either. Use library for remainder. */
4904 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4906 unsignedp
, OPTAB_LIB_WIDEN
);
4907 /* No remainder function. Try a quotient-and-remainder
4908 function, keeping the remainder. */
4911 remainder
= gen_reg_rtx (compute_mode
);
4912 if (!expand_twoval_binop_libfunc
4913 (unsignedp
? udivmod_optab
: sdivmod_optab
,
4915 NULL_RTX
, remainder
,
4916 unsignedp
? UMOD
: MOD
))
4917 remainder
= NULL_RTX
;
4922 /* We divided. Now finish doing X - Y * (X / Y). */
4923 remainder
= expand_mult (compute_mode
, quotient
, op1
,
4924 NULL_RTX
, unsignedp
);
4925 remainder
= expand_binop (compute_mode
, sub_optab
, op0
,
4926 remainder
, target
, unsignedp
,
4931 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4934 /* Return a tree node with data type TYPE, describing the value of X.
4935 Usually this is an VAR_DECL, if there is no obvious better choice.
4936 X may be an expression, however we only support those expressions
4937 generated by loop.c. */
4940 make_tree (tree type
, rtx x
)
4944 switch (GET_CODE (x
))
4947 case CONST_WIDE_INT
:
4948 t
= wide_int_to_tree (type
, std::make_pair (x
, TYPE_MODE (type
)));
4952 STATIC_ASSERT (HOST_BITS_PER_WIDE_INT
* 2 <= MAX_BITSIZE_MODE_ANY_INT
);
4953 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (x
) == VOIDmode
)
4954 t
= wide_int_to_tree (type
,
4955 wide_int::from_array (&CONST_DOUBLE_LOW (x
), 2,
4956 HOST_BITS_PER_WIDE_INT
* 2));
4961 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
4962 t
= build_real (type
, d
);
4969 int units
= CONST_VECTOR_NUNITS (x
);
4970 tree itype
= TREE_TYPE (type
);
4974 /* Build a tree with vector elements. */
4975 elts
= XALLOCAVEC (tree
, units
);
4976 for (i
= units
- 1; i
>= 0; --i
)
4978 rtx elt
= CONST_VECTOR_ELT (x
, i
);
4979 elts
[i
] = make_tree (itype
, elt
);
4982 return build_vector (type
, elts
);
4986 return fold_build2 (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4987 make_tree (type
, XEXP (x
, 1)));
4990 return fold_build2 (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4991 make_tree (type
, XEXP (x
, 1)));
4994 return fold_build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0)));
4997 return fold_build2 (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4998 make_tree (type
, XEXP (x
, 1)));
5001 return fold_build2 (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5002 make_tree (type
, XEXP (x
, 1)));
5005 t
= unsigned_type_for (type
);
5006 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5007 make_tree (t
, XEXP (x
, 0)),
5008 make_tree (type
, XEXP (x
, 1))));
5011 t
= signed_type_for (type
);
5012 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5013 make_tree (t
, XEXP (x
, 0)),
5014 make_tree (type
, XEXP (x
, 1))));
5017 if (TREE_CODE (type
) != REAL_TYPE
)
5018 t
= signed_type_for (type
);
5022 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5023 make_tree (t
, XEXP (x
, 0)),
5024 make_tree (t
, XEXP (x
, 1))));
5026 t
= unsigned_type_for (type
);
5027 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5028 make_tree (t
, XEXP (x
, 0)),
5029 make_tree (t
, XEXP (x
, 1))));
5033 t
= lang_hooks
.types
.type_for_mode (GET_MODE (XEXP (x
, 0)),
5034 GET_CODE (x
) == ZERO_EXTEND
);
5035 return fold_convert (type
, make_tree (t
, XEXP (x
, 0)));
5038 return make_tree (type
, XEXP (x
, 0));
5041 t
= SYMBOL_REF_DECL (x
);
5043 return fold_convert (type
, build_fold_addr_expr (t
));
5044 /* else fall through. */
5047 t
= build_decl (RTL_LOCATION (x
), VAR_DECL
, NULL_TREE
, type
);
5049 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5050 address mode to pointer mode. */
5051 if (POINTER_TYPE_P (type
))
5052 x
= convert_memory_address_addr_space
5053 (TYPE_MODE (type
), x
, TYPE_ADDR_SPACE (TREE_TYPE (type
)));
5055 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5056 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5057 t
->decl_with_rtl
.rtl
= x
;
5063 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5064 and returning TARGET.
5066 If TARGET is 0, a pseudo-register or constant is returned. */
5069 expand_and (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
)
5073 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
5074 tem
= simplify_binary_operation (AND
, mode
, op0
, op1
);
5076 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
5080 else if (tem
!= target
)
5081 emit_move_insn (target
, tem
);
5085 /* Helper function for emit_store_flag. */
5087 emit_cstore (rtx target
, enum insn_code icode
, enum rtx_code code
,
5088 enum machine_mode mode
, enum machine_mode compare_mode
,
5089 int unsignedp
, rtx x
, rtx y
, int normalizep
,
5090 enum machine_mode target_mode
)
5092 struct expand_operand ops
[4];
5093 rtx op0
, last
, comparison
, subtarget
;
5094 enum machine_mode result_mode
= targetm
.cstore_mode (icode
);
5096 last
= get_last_insn ();
5097 x
= prepare_operand (icode
, x
, 2, mode
, compare_mode
, unsignedp
);
5098 y
= prepare_operand (icode
, y
, 3, mode
, compare_mode
, unsignedp
);
5101 delete_insns_since (last
);
5105 if (target_mode
== VOIDmode
)
5106 target_mode
= result_mode
;
5108 target
= gen_reg_rtx (target_mode
);
5110 comparison
= gen_rtx_fmt_ee (code
, result_mode
, x
, y
);
5112 create_output_operand (&ops
[0], optimize
? NULL_RTX
: target
, result_mode
);
5113 create_fixed_operand (&ops
[1], comparison
);
5114 create_fixed_operand (&ops
[2], x
);
5115 create_fixed_operand (&ops
[3], y
);
5116 if (!maybe_expand_insn (icode
, 4, ops
))
5118 delete_insns_since (last
);
5121 subtarget
= ops
[0].value
;
5123 /* If we are converting to a wider mode, first convert to
5124 TARGET_MODE, then normalize. This produces better combining
5125 opportunities on machines that have a SIGN_EXTRACT when we are
5126 testing a single bit. This mostly benefits the 68k.
5128 If STORE_FLAG_VALUE does not have the sign bit set when
5129 interpreted in MODE, we can do this conversion as unsigned, which
5130 is usually more efficient. */
5131 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (result_mode
))
5133 convert_move (target
, subtarget
,
5134 val_signbit_known_clear_p (result_mode
,
5137 result_mode
= target_mode
;
5142 /* If we want to keep subexpressions around, don't reuse our last
5147 /* Now normalize to the proper value in MODE. Sometimes we don't
5148 have to do anything. */
5149 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
5151 /* STORE_FLAG_VALUE might be the most negative number, so write
5152 the comparison this way to avoid a compiler-time warning. */
5153 else if (- normalizep
== STORE_FLAG_VALUE
)
5154 op0
= expand_unop (result_mode
, neg_optab
, op0
, subtarget
, 0);
5156 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5157 it hard to use a value of just the sign bit due to ANSI integer
5158 constant typing rules. */
5159 else if (val_signbit_known_set_p (result_mode
, STORE_FLAG_VALUE
))
5160 op0
= expand_shift (RSHIFT_EXPR
, result_mode
, op0
,
5161 GET_MODE_BITSIZE (result_mode
) - 1, subtarget
,
5165 gcc_assert (STORE_FLAG_VALUE
& 1);
5167 op0
= expand_and (result_mode
, op0
, const1_rtx
, subtarget
);
5168 if (normalizep
== -1)
5169 op0
= expand_unop (result_mode
, neg_optab
, op0
, op0
, 0);
5172 /* If we were converting to a smaller mode, do the conversion now. */
5173 if (target_mode
!= result_mode
)
5175 convert_move (target
, op0
, 0);
5183 /* A subroutine of emit_store_flag only including "tricks" that do not
5184 need a recursive call. These are kept separate to avoid infinite
5188 emit_store_flag_1 (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5189 enum machine_mode mode
, int unsignedp
, int normalizep
,
5190 enum machine_mode target_mode
)
5193 enum insn_code icode
;
5194 enum machine_mode compare_mode
;
5195 enum mode_class mclass
;
5196 enum rtx_code scode
;
5200 code
= unsigned_condition (code
);
5201 scode
= swap_condition (code
);
5203 /* If one operand is constant, make it the second one. Only do this
5204 if the other operand is not constant as well. */
5206 if (swap_commutative_operands_p (op0
, op1
))
5211 code
= swap_condition (code
);
5214 if (mode
== VOIDmode
)
5215 mode
= GET_MODE (op0
);
5217 /* For some comparisons with 1 and -1, we can convert this to
5218 comparisons with zero. This will often produce more opportunities for
5219 store-flag insns. */
5224 if (op1
== const1_rtx
)
5225 op1
= const0_rtx
, code
= LE
;
5228 if (op1
== constm1_rtx
)
5229 op1
= const0_rtx
, code
= LT
;
5232 if (op1
== const1_rtx
)
5233 op1
= const0_rtx
, code
= GT
;
5236 if (op1
== constm1_rtx
)
5237 op1
= const0_rtx
, code
= GE
;
5240 if (op1
== const1_rtx
)
5241 op1
= const0_rtx
, code
= NE
;
5244 if (op1
== const1_rtx
)
5245 op1
= const0_rtx
, code
= EQ
;
5251 /* If we are comparing a double-word integer with zero or -1, we can
5252 convert the comparison into one involving a single word. */
5253 if (GET_MODE_BITSIZE (mode
) == BITS_PER_WORD
* 2
5254 && GET_MODE_CLASS (mode
) == MODE_INT
5255 && (!MEM_P (op0
) || ! MEM_VOLATILE_P (op0
)))
5257 if ((code
== EQ
|| code
== NE
)
5258 && (op1
== const0_rtx
|| op1
== constm1_rtx
))
5262 /* Do a logical OR or AND of the two words and compare the
5264 op00
= simplify_gen_subreg (word_mode
, op0
, mode
, 0);
5265 op01
= simplify_gen_subreg (word_mode
, op0
, mode
, UNITS_PER_WORD
);
5266 tem
= expand_binop (word_mode
,
5267 op1
== const0_rtx
? ior_optab
: and_optab
,
5268 op00
, op01
, NULL_RTX
, unsignedp
,
5272 tem
= emit_store_flag (NULL_RTX
, code
, tem
, op1
, word_mode
,
5273 unsignedp
, normalizep
);
5275 else if ((code
== LT
|| code
== GE
) && op1
== const0_rtx
)
5279 /* If testing the sign bit, can just test on high word. */
5280 op0h
= simplify_gen_subreg (word_mode
, op0
, mode
,
5281 subreg_highpart_offset (word_mode
,
5283 tem
= emit_store_flag (NULL_RTX
, code
, op0h
, op1
, word_mode
,
5284 unsignedp
, normalizep
);
5291 if (target_mode
== VOIDmode
|| GET_MODE (tem
) == target_mode
)
5294 target
= gen_reg_rtx (target_mode
);
5296 convert_move (target
, tem
,
5297 !val_signbit_known_set_p (word_mode
,
5298 (normalizep
? normalizep
5299 : STORE_FLAG_VALUE
)));
5304 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5305 complement of A (for GE) and shifting the sign bit to the low bit. */
5306 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
5307 && GET_MODE_CLASS (mode
) == MODE_INT
5308 && (normalizep
|| STORE_FLAG_VALUE
== 1
5309 || val_signbit_p (mode
, STORE_FLAG_VALUE
)))
5316 /* If the result is to be wider than OP0, it is best to convert it
5317 first. If it is to be narrower, it is *incorrect* to convert it
5319 else if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (mode
))
5321 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5325 if (target_mode
!= mode
)
5329 op0
= expand_unop (mode
, one_cmpl_optab
, op0
,
5330 ((STORE_FLAG_VALUE
== 1 || normalizep
)
5331 ? 0 : subtarget
), 0);
5333 if (STORE_FLAG_VALUE
== 1 || normalizep
)
5334 /* If we are supposed to produce a 0/1 value, we want to do
5335 a logical shift from the sign bit to the low-order bit; for
5336 a -1/0 value, we do an arithmetic shift. */
5337 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5338 GET_MODE_BITSIZE (mode
) - 1,
5339 subtarget
, normalizep
!= -1);
5341 if (mode
!= target_mode
)
5342 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5347 mclass
= GET_MODE_CLASS (mode
);
5348 for (compare_mode
= mode
; compare_mode
!= VOIDmode
;
5349 compare_mode
= GET_MODE_WIDER_MODE (compare_mode
))
5351 enum machine_mode optab_mode
= mclass
== MODE_CC
? CCmode
: compare_mode
;
5352 icode
= optab_handler (cstore_optab
, optab_mode
);
5353 if (icode
!= CODE_FOR_nothing
)
5355 do_pending_stack_adjust ();
5356 tem
= emit_cstore (target
, icode
, code
, mode
, compare_mode
,
5357 unsignedp
, op0
, op1
, normalizep
, target_mode
);
5361 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5363 tem
= emit_cstore (target
, icode
, scode
, mode
, compare_mode
,
5364 unsignedp
, op1
, op0
, normalizep
, target_mode
);
5375 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5376 and storing in TARGET. Normally return TARGET.
5377 Return 0 if that cannot be done.
5379 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5380 it is VOIDmode, they cannot both be CONST_INT.
5382 UNSIGNEDP is for the case where we have to widen the operands
5383 to perform the operation. It says to use zero-extension.
5385 NORMALIZEP is 1 if we should convert the result to be either zero
5386 or one. Normalize is -1 if we should convert the result to be
5387 either zero or -1. If NORMALIZEP is zero, the result will be left
5388 "raw" out of the scc insn. */
5391 emit_store_flag (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5392 enum machine_mode mode
, int unsignedp
, int normalizep
)
5394 enum machine_mode target_mode
= target
? GET_MODE (target
) : VOIDmode
;
5395 enum rtx_code rcode
;
5397 rtx tem
, last
, trueval
;
5399 /* If we compare constants, we shouldn't use a store-flag operation,
5400 but a constant load. We can get there via the vanilla route that
5401 usually generates a compare-branch sequence, but will in this case
5402 fold the comparison to a constant, and thus elide the branch. */
5403 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
5406 tem
= emit_store_flag_1 (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
,
5411 /* If we reached here, we can't do this with a scc insn, however there
5412 are some comparisons that can be done in other ways. Don't do any
5413 of these cases if branches are very cheap. */
5414 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5417 /* See what we need to return. We can only return a 1, -1, or the
5420 if (normalizep
== 0)
5422 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
5423 normalizep
= STORE_FLAG_VALUE
;
5425 else if (val_signbit_p (mode
, STORE_FLAG_VALUE
))
5431 last
= get_last_insn ();
5433 /* If optimizing, use different pseudo registers for each insn, instead
5434 of reusing the same pseudo. This leads to better CSE, but slows
5435 down the compiler, since there are more pseudos */
5436 subtarget
= (!optimize
5437 && (target_mode
== mode
)) ? target
: NULL_RTX
;
5438 trueval
= GEN_INT (normalizep
? normalizep
: STORE_FLAG_VALUE
);
5440 /* For floating-point comparisons, try the reverse comparison or try
5441 changing the "orderedness" of the comparison. */
5442 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5444 enum rtx_code first_code
;
5447 rcode
= reverse_condition_maybe_unordered (code
);
5448 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5449 && (code
== ORDERED
|| code
== UNORDERED
5450 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5451 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5453 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5454 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5456 /* For the reverse comparison, use either an addition or a XOR. */
5458 && rtx_cost (GEN_INT (normalizep
), PLUS
, 1,
5459 optimize_insn_for_speed_p ()) == 0)
5461 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5462 STORE_FLAG_VALUE
, target_mode
);
5464 return expand_binop (target_mode
, add_optab
, tem
,
5465 gen_int_mode (normalizep
, target_mode
),
5466 target
, 0, OPTAB_WIDEN
);
5469 && rtx_cost (trueval
, XOR
, 1,
5470 optimize_insn_for_speed_p ()) == 0)
5472 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5473 normalizep
, target_mode
);
5475 return expand_binop (target_mode
, xor_optab
, tem
, trueval
,
5476 target
, INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5480 delete_insns_since (last
);
5482 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5483 if (code
== ORDERED
|| code
== UNORDERED
)
5486 and_them
= split_comparison (code
, mode
, &first_code
, &code
);
5488 /* If there are no NaNs, the first comparison should always fall through.
5489 Effectively change the comparison to the other one. */
5490 if (!HONOR_NANS (mode
))
5492 gcc_assert (first_code
== (and_them
? ORDERED
: UNORDERED
));
5493 return emit_store_flag_1 (target
, code
, op0
, op1
, mode
, 0, normalizep
,
5497 #ifdef HAVE_conditional_move
5498 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5499 conditional move. */
5500 tem
= emit_store_flag_1 (subtarget
, first_code
, op0
, op1
, mode
, 0,
5501 normalizep
, target_mode
);
5506 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5507 tem
, const0_rtx
, GET_MODE (tem
), 0);
5509 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5510 trueval
, tem
, GET_MODE (tem
), 0);
5513 delete_insns_since (last
);
5520 /* The remaining tricks only apply to integer comparisons. */
5522 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5525 /* If this is an equality comparison of integers, we can try to exclusive-or
5526 (or subtract) the two operands and use a recursive call to try the
5527 comparison with zero. Don't do any of these cases if branches are
5530 if ((code
== EQ
|| code
== NE
) && op1
!= const0_rtx
)
5532 tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
5536 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
5539 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
5540 mode
, unsignedp
, normalizep
);
5544 delete_insns_since (last
);
5547 /* For integer comparisons, try the reverse comparison. However, for
5548 small X and if we'd have anyway to extend, implementing "X != 0"
5549 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5550 rcode
= reverse_condition (code
);
5551 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5552 && ! (optab_handler (cstore_optab
, mode
) == CODE_FOR_nothing
5554 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
5555 && op1
== const0_rtx
))
5557 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5558 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5560 /* Again, for the reverse comparison, use either an addition or a XOR. */
5562 && rtx_cost (GEN_INT (normalizep
), PLUS
, 1,
5563 optimize_insn_for_speed_p ()) == 0)
5565 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5566 STORE_FLAG_VALUE
, target_mode
);
5568 tem
= expand_binop (target_mode
, add_optab
, tem
,
5569 gen_int_mode (normalizep
, target_mode
),
5570 target
, 0, OPTAB_WIDEN
);
5573 && rtx_cost (trueval
, XOR
, 1,
5574 optimize_insn_for_speed_p ()) == 0)
5576 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5577 normalizep
, target_mode
);
5579 tem
= expand_binop (target_mode
, xor_optab
, tem
, trueval
, target
,
5580 INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5585 delete_insns_since (last
);
5588 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5589 the constant zero. Reject all other comparisons at this point. Only
5590 do LE and GT if branches are expensive since they are expensive on
5591 2-operand machines. */
5593 if (op1
!= const0_rtx
5594 || (code
!= EQ
&& code
!= NE
5595 && (BRANCH_COST (optimize_insn_for_speed_p (),
5596 false) <= 1 || (code
!= LE
&& code
!= GT
))))
5599 /* Try to put the result of the comparison in the sign bit. Assume we can't
5600 do the necessary operation below. */
5604 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5605 the sign bit set. */
5609 /* This is destructive, so SUBTARGET can't be OP0. */
5610 if (rtx_equal_p (subtarget
, op0
))
5613 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
5616 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
5620 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5621 number of bits in the mode of OP0, minus one. */
5625 if (rtx_equal_p (subtarget
, op0
))
5628 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5629 GET_MODE_BITSIZE (mode
) - 1,
5631 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
5635 if (code
== EQ
|| code
== NE
)
5637 /* For EQ or NE, one way to do the comparison is to apply an operation
5638 that converts the operand into a positive number if it is nonzero
5639 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5640 for NE we negate. This puts the result in the sign bit. Then we
5641 normalize with a shift, if needed.
5643 Two operations that can do the above actions are ABS and FFS, so try
5644 them. If that doesn't work, and MODE is smaller than a full word,
5645 we can use zero-extension to the wider mode (an unsigned conversion)
5646 as the operation. */
5648 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5649 that is compensated by the subsequent overflow when subtracting
5652 if (optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)
5653 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
5654 else if (optab_handler (ffs_optab
, mode
) != CODE_FOR_nothing
)
5655 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
5656 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
5658 tem
= convert_modes (word_mode
, mode
, op0
, 1);
5665 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
5668 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
5671 /* If we couldn't do it that way, for NE we can "or" the two's complement
5672 of the value with itself. For EQ, we take the one's complement of
5673 that "or", which is an extra insn, so we only handle EQ if branches
5678 || BRANCH_COST (optimize_insn_for_speed_p (),
5681 if (rtx_equal_p (subtarget
, op0
))
5684 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
5685 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
5688 if (tem
&& code
== EQ
)
5689 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
5693 if (tem
&& normalizep
)
5694 tem
= expand_shift (RSHIFT_EXPR
, mode
, tem
,
5695 GET_MODE_BITSIZE (mode
) - 1,
5696 subtarget
, normalizep
== 1);
5702 else if (GET_MODE (tem
) != target_mode
)
5704 convert_move (target
, tem
, 0);
5707 else if (!subtarget
)
5709 emit_move_insn (target
, tem
);
5714 delete_insns_since (last
);
5719 /* Like emit_store_flag, but always succeeds. */
5722 emit_store_flag_force (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5723 enum machine_mode mode
, int unsignedp
, int normalizep
)
5726 rtx trueval
, falseval
;
5728 /* First see if emit_store_flag can do the job. */
5729 tem
= emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
);
5734 target
= gen_reg_rtx (word_mode
);
5736 /* If this failed, we have to do this with set/compare/jump/set code.
5737 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5738 trueval
= normalizep
? GEN_INT (normalizep
) : const1_rtx
;
5740 && GET_MODE_CLASS (mode
) == MODE_INT
5743 && op1
== const0_rtx
)
5745 label
= gen_label_rtx ();
5746 do_compare_rtx_and_jump (target
, const0_rtx
, EQ
, unsignedp
,
5747 mode
, NULL_RTX
, NULL_RTX
, label
, -1);
5748 emit_move_insn (target
, trueval
);
5754 || reg_mentioned_p (target
, op0
) || reg_mentioned_p (target
, op1
))
5755 target
= gen_reg_rtx (GET_MODE (target
));
5757 /* Jump in the right direction if the target cannot implement CODE
5758 but can jump on its reverse condition. */
5759 falseval
= const0_rtx
;
5760 if (! can_compare_p (code
, mode
, ccp_jump
)
5761 && (! FLOAT_MODE_P (mode
)
5762 || code
== ORDERED
|| code
== UNORDERED
5763 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5764 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5766 enum rtx_code rcode
;
5767 if (FLOAT_MODE_P (mode
))
5768 rcode
= reverse_condition_maybe_unordered (code
);
5770 rcode
= reverse_condition (code
);
5772 /* Canonicalize to UNORDERED for the libcall. */
5773 if (can_compare_p (rcode
, mode
, ccp_jump
)
5774 || (code
== ORDERED
&& ! can_compare_p (ORDERED
, mode
, ccp_jump
)))
5777 trueval
= const0_rtx
;
5782 emit_move_insn (target
, trueval
);
5783 label
= gen_label_rtx ();
5784 do_compare_rtx_and_jump (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
,
5785 NULL_RTX
, label
, -1);
5787 emit_move_insn (target
, falseval
);
5793 /* Perform possibly multi-word comparison and conditional jump to LABEL
5794 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5795 now a thin wrapper around do_compare_rtx_and_jump. */
5798 do_cmp_and_jump (rtx arg1
, rtx arg2
, enum rtx_code op
, enum machine_mode mode
,
5801 int unsignedp
= (op
== LTU
|| op
== LEU
|| op
== GTU
|| op
== GEU
);
5802 do_compare_rtx_and_jump (arg1
, arg2
, op
, unsignedp
, mode
,
5803 NULL_RTX
, NULL_RTX
, label
, -1);