1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987-2013 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
26 #include "diagnostic-core.h"
31 #include "insn-config.h"
35 #include "langhooks.h"
40 struct target_expmed default_target_expmed
;
42 struct target_expmed
*this_target_expmed
= &default_target_expmed
;
45 static void store_fixed_bit_field (rtx
, unsigned HOST_WIDE_INT
,
46 unsigned HOST_WIDE_INT
,
47 unsigned HOST_WIDE_INT
,
48 unsigned HOST_WIDE_INT
,
50 static void store_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
51 unsigned HOST_WIDE_INT
,
52 unsigned HOST_WIDE_INT
,
53 unsigned HOST_WIDE_INT
,
55 static rtx
extract_fixed_bit_field (enum machine_mode
, rtx
,
56 unsigned HOST_WIDE_INT
,
57 unsigned HOST_WIDE_INT
, rtx
, int, bool);
58 static rtx
mask_rtx (enum machine_mode
, int, int, int);
59 static rtx
lshift_value (enum machine_mode
, rtx
, int, int);
60 static rtx
extract_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
61 unsigned HOST_WIDE_INT
, int);
62 static void do_cmp_and_jump (rtx
, rtx
, enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
expand_smod_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
64 static rtx
expand_sdiv_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
66 /* Test whether a value is zero of a power of two. */
67 #define EXACT_POWER_OF_2_OR_ZERO_P(x) \
68 (((x) & ((x) - (unsigned HOST_WIDE_INT) 1)) == 0)
70 struct init_expmed_rtl
78 struct rtx_def sdiv_32
;
79 struct rtx_def smod_32
;
80 struct rtx_def wide_mult
;
81 struct rtx_def wide_lshr
;
82 struct rtx_def wide_trunc
;
84 struct rtx_def shift_mult
;
85 struct rtx_def shift_add
;
86 struct rtx_def shift_sub0
;
87 struct rtx_def shift_sub1
;
91 rtx pow2
[MAX_BITS_PER_WORD
];
92 rtx cint
[MAX_BITS_PER_WORD
];
96 init_expmed_one_conv (struct init_expmed_rtl
*all
, enum machine_mode to_mode
,
97 enum machine_mode from_mode
, bool speed
)
99 int to_size
, from_size
;
102 /* We're given no information about the true size of a partial integer,
103 only the size of the "full" integer it requires for storage. For
104 comparison purposes here, reduce the bit size by one in that case. */
105 to_size
= (GET_MODE_BITSIZE (to_mode
)
106 - (GET_MODE_CLASS (to_mode
) == MODE_PARTIAL_INT
));
107 from_size
= (GET_MODE_BITSIZE (from_mode
)
108 - (GET_MODE_CLASS (from_mode
) == MODE_PARTIAL_INT
));
110 /* Assume cost of zero-extend and sign-extend is the same. */
111 which
= (to_size
< from_size
? &all
->trunc
: &all
->zext
);
113 PUT_MODE (&all
->reg
, from_mode
);
114 set_convert_cost (to_mode
, from_mode
, speed
, set_src_cost (which
, speed
));
118 init_expmed_one_mode (struct init_expmed_rtl
*all
,
119 enum machine_mode mode
, int speed
)
121 int m
, n
, mode_bitsize
;
122 enum machine_mode mode_from
;
124 mode_bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
126 PUT_MODE (&all
->reg
, mode
);
127 PUT_MODE (&all
->plus
, mode
);
128 PUT_MODE (&all
->neg
, mode
);
129 PUT_MODE (&all
->mult
, mode
);
130 PUT_MODE (&all
->sdiv
, mode
);
131 PUT_MODE (&all
->udiv
, mode
);
132 PUT_MODE (&all
->sdiv_32
, mode
);
133 PUT_MODE (&all
->smod_32
, mode
);
134 PUT_MODE (&all
->wide_trunc
, mode
);
135 PUT_MODE (&all
->shift
, mode
);
136 PUT_MODE (&all
->shift_mult
, mode
);
137 PUT_MODE (&all
->shift_add
, mode
);
138 PUT_MODE (&all
->shift_sub0
, mode
);
139 PUT_MODE (&all
->shift_sub1
, mode
);
140 PUT_MODE (&all
->zext
, mode
);
141 PUT_MODE (&all
->trunc
, mode
);
143 set_add_cost (speed
, mode
, set_src_cost (&all
->plus
, speed
));
144 set_neg_cost (speed
, mode
, set_src_cost (&all
->neg
, speed
));
145 set_mul_cost (speed
, mode
, set_src_cost (&all
->mult
, speed
));
146 set_sdiv_cost (speed
, mode
, set_src_cost (&all
->sdiv
, speed
));
147 set_udiv_cost (speed
, mode
, set_src_cost (&all
->udiv
, speed
));
149 set_sdiv_pow2_cheap (speed
, mode
, (set_src_cost (&all
->sdiv_32
, speed
)
150 <= 2 * add_cost (speed
, mode
)));
151 set_smod_pow2_cheap (speed
, mode
, (set_src_cost (&all
->smod_32
, speed
)
152 <= 4 * add_cost (speed
, mode
)));
154 set_shift_cost (speed
, mode
, 0, 0);
156 int cost
= add_cost (speed
, mode
);
157 set_shiftadd_cost (speed
, mode
, 0, cost
);
158 set_shiftsub0_cost (speed
, mode
, 0, cost
);
159 set_shiftsub1_cost (speed
, mode
, 0, cost
);
162 n
= MIN (MAX_BITS_PER_WORD
, mode_bitsize
);
163 for (m
= 1; m
< n
; m
++)
165 XEXP (&all
->shift
, 1) = all
->cint
[m
];
166 XEXP (&all
->shift_mult
, 1) = all
->pow2
[m
];
168 set_shift_cost (speed
, mode
, m
, set_src_cost (&all
->shift
, speed
));
169 set_shiftadd_cost (speed
, mode
, m
, set_src_cost (&all
->shift_add
, speed
));
170 set_shiftsub0_cost (speed
, mode
, m
, set_src_cost (&all
->shift_sub0
, speed
));
171 set_shiftsub1_cost (speed
, mode
, m
, set_src_cost (&all
->shift_sub1
, speed
));
174 if (SCALAR_INT_MODE_P (mode
))
176 for (mode_from
= MIN_MODE_INT
; mode_from
<= MAX_MODE_INT
;
177 mode_from
= (enum machine_mode
)(mode_from
+ 1))
178 init_expmed_one_conv (all
, mode
, mode_from
, speed
);
180 if (GET_MODE_CLASS (mode
) == MODE_INT
)
182 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
183 if (wider_mode
!= VOIDmode
)
185 PUT_MODE (&all
->zext
, wider_mode
);
186 PUT_MODE (&all
->wide_mult
, wider_mode
);
187 PUT_MODE (&all
->wide_lshr
, wider_mode
);
188 XEXP (&all
->wide_lshr
, 1) = GEN_INT (mode_bitsize
);
190 set_mul_widen_cost (speed
, wider_mode
,
191 set_src_cost (&all
->wide_mult
, speed
));
192 set_mul_highpart_cost (speed
, mode
,
193 set_src_cost (&all
->wide_trunc
, speed
));
201 struct init_expmed_rtl all
;
202 enum machine_mode mode
;
205 memset (&all
, 0, sizeof all
);
206 for (m
= 1; m
< MAX_BITS_PER_WORD
; m
++)
208 all
.pow2
[m
] = GEN_INT ((HOST_WIDE_INT
) 1 << m
);
209 all
.cint
[m
] = GEN_INT (m
);
212 PUT_CODE (&all
.reg
, REG
);
213 /* Avoid using hard regs in ways which may be unsupported. */
214 SET_REGNO (&all
.reg
, LAST_VIRTUAL_REGISTER
+ 1);
216 PUT_CODE (&all
.plus
, PLUS
);
217 XEXP (&all
.plus
, 0) = &all
.reg
;
218 XEXP (&all
.plus
, 1) = &all
.reg
;
220 PUT_CODE (&all
.neg
, NEG
);
221 XEXP (&all
.neg
, 0) = &all
.reg
;
223 PUT_CODE (&all
.mult
, MULT
);
224 XEXP (&all
.mult
, 0) = &all
.reg
;
225 XEXP (&all
.mult
, 1) = &all
.reg
;
227 PUT_CODE (&all
.sdiv
, DIV
);
228 XEXP (&all
.sdiv
, 0) = &all
.reg
;
229 XEXP (&all
.sdiv
, 1) = &all
.reg
;
231 PUT_CODE (&all
.udiv
, UDIV
);
232 XEXP (&all
.udiv
, 0) = &all
.reg
;
233 XEXP (&all
.udiv
, 1) = &all
.reg
;
235 PUT_CODE (&all
.sdiv_32
, DIV
);
236 XEXP (&all
.sdiv_32
, 0) = &all
.reg
;
237 XEXP (&all
.sdiv_32
, 1) = 32 < MAX_BITS_PER_WORD
? all
.cint
[32] : GEN_INT (32);
239 PUT_CODE (&all
.smod_32
, MOD
);
240 XEXP (&all
.smod_32
, 0) = &all
.reg
;
241 XEXP (&all
.smod_32
, 1) = XEXP (&all
.sdiv_32
, 1);
243 PUT_CODE (&all
.zext
, ZERO_EXTEND
);
244 XEXP (&all
.zext
, 0) = &all
.reg
;
246 PUT_CODE (&all
.wide_mult
, MULT
);
247 XEXP (&all
.wide_mult
, 0) = &all
.zext
;
248 XEXP (&all
.wide_mult
, 1) = &all
.zext
;
250 PUT_CODE (&all
.wide_lshr
, LSHIFTRT
);
251 XEXP (&all
.wide_lshr
, 0) = &all
.wide_mult
;
253 PUT_CODE (&all
.wide_trunc
, TRUNCATE
);
254 XEXP (&all
.wide_trunc
, 0) = &all
.wide_lshr
;
256 PUT_CODE (&all
.shift
, ASHIFT
);
257 XEXP (&all
.shift
, 0) = &all
.reg
;
259 PUT_CODE (&all
.shift_mult
, MULT
);
260 XEXP (&all
.shift_mult
, 0) = &all
.reg
;
262 PUT_CODE (&all
.shift_add
, PLUS
);
263 XEXP (&all
.shift_add
, 0) = &all
.shift_mult
;
264 XEXP (&all
.shift_add
, 1) = &all
.reg
;
266 PUT_CODE (&all
.shift_sub0
, MINUS
);
267 XEXP (&all
.shift_sub0
, 0) = &all
.shift_mult
;
268 XEXP (&all
.shift_sub0
, 1) = &all
.reg
;
270 PUT_CODE (&all
.shift_sub1
, MINUS
);
271 XEXP (&all
.shift_sub1
, 0) = &all
.reg
;
272 XEXP (&all
.shift_sub1
, 1) = &all
.shift_mult
;
274 PUT_CODE (&all
.trunc
, TRUNCATE
);
275 XEXP (&all
.trunc
, 0) = &all
.reg
;
277 for (speed
= 0; speed
< 2; speed
++)
279 crtl
->maybe_hot_insn_p
= speed
;
280 set_zero_cost (speed
, set_src_cost (const0_rtx
, speed
));
282 for (mode
= MIN_MODE_INT
; mode
<= MAX_MODE_INT
;
283 mode
= (enum machine_mode
)(mode
+ 1))
284 init_expmed_one_mode (&all
, mode
, speed
);
286 if (MIN_MODE_PARTIAL_INT
!= VOIDmode
)
287 for (mode
= MIN_MODE_PARTIAL_INT
; mode
<= MAX_MODE_PARTIAL_INT
;
288 mode
= (enum machine_mode
)(mode
+ 1))
289 init_expmed_one_mode (&all
, mode
, speed
);
291 if (MIN_MODE_VECTOR_INT
!= VOIDmode
)
292 for (mode
= MIN_MODE_VECTOR_INT
; mode
<= MAX_MODE_VECTOR_INT
;
293 mode
= (enum machine_mode
)(mode
+ 1))
294 init_expmed_one_mode (&all
, mode
, speed
);
297 if (alg_hash_used_p ())
299 struct alg_hash_entry
*p
= alg_hash_entry_ptr (0);
300 memset (p
, 0, sizeof (*p
) * NUM_ALG_HASH_ENTRIES
);
303 set_alg_hash_used_p (true);
304 default_rtl_profile ();
307 /* Return an rtx representing minus the value of X.
308 MODE is the intended mode of the result,
309 useful if X is a CONST_INT. */
312 negate_rtx (enum machine_mode mode
, rtx x
)
314 rtx result
= simplify_unary_operation (NEG
, mode
, x
, mode
);
317 result
= expand_unop (mode
, neg_optab
, x
, NULL_RTX
, 0);
322 /* Adjust bitfield memory MEM so that it points to the first unit of mode
323 MODE that contains a bitfield of size BITSIZE at bit position BITNUM.
324 If MODE is BLKmode, return a reference to every byte in the bitfield.
325 Set *NEW_BITNUM to the bit position of the field within the new memory. */
328 narrow_bit_field_mem (rtx mem
, enum machine_mode mode
,
329 unsigned HOST_WIDE_INT bitsize
,
330 unsigned HOST_WIDE_INT bitnum
,
331 unsigned HOST_WIDE_INT
*new_bitnum
)
335 *new_bitnum
= bitnum
% BITS_PER_UNIT
;
336 HOST_WIDE_INT offset
= bitnum
/ BITS_PER_UNIT
;
337 HOST_WIDE_INT size
= ((*new_bitnum
+ bitsize
+ BITS_PER_UNIT
- 1)
339 return adjust_bitfield_address_size (mem
, mode
, offset
, size
);
343 unsigned int unit
= GET_MODE_BITSIZE (mode
);
344 *new_bitnum
= bitnum
% unit
;
345 HOST_WIDE_INT offset
= (bitnum
- *new_bitnum
) / BITS_PER_UNIT
;
346 return adjust_bitfield_address (mem
, mode
, offset
);
350 /* The caller wants to perform insertion or extraction PATTERN on a
351 bitfield of size BITSIZE at BITNUM bits into memory operand OP0.
352 BITREGION_START and BITREGION_END are as for store_bit_field
353 and FIELDMODE is the natural mode of the field.
355 Search for a mode that is compatible with the memory access
356 restrictions and (where applicable) with a register insertion or
357 extraction. Return the new memory on success, storing the adjusted
358 bit position in *NEW_BITNUM. Return null otherwise. */
361 adjust_bit_field_mem_for_reg (enum extraction_pattern pattern
,
362 rtx op0
, HOST_WIDE_INT bitsize
,
363 HOST_WIDE_INT bitnum
,
364 unsigned HOST_WIDE_INT bitregion_start
,
365 unsigned HOST_WIDE_INT bitregion_end
,
366 enum machine_mode fieldmode
,
367 unsigned HOST_WIDE_INT
*new_bitnum
)
369 bit_field_mode_iterator
iter (bitsize
, bitnum
, bitregion_start
,
370 bitregion_end
, MEM_ALIGN (op0
),
371 MEM_VOLATILE_P (op0
));
372 enum machine_mode best_mode
;
373 if (iter
.next_mode (&best_mode
))
375 /* We can use a memory in BEST_MODE. See whether this is true for
376 any wider modes. All other things being equal, we prefer to
377 use the widest mode possible because it tends to expose more
378 CSE opportunities. */
379 if (!iter
.prefer_smaller_modes ())
381 /* Limit the search to the mode required by the corresponding
382 register insertion or extraction instruction, if any. */
383 enum machine_mode limit_mode
= word_mode
;
384 extraction_insn insn
;
385 if (get_best_reg_extraction_insn (&insn
, pattern
,
386 GET_MODE_BITSIZE (best_mode
),
388 limit_mode
= insn
.field_mode
;
390 enum machine_mode wider_mode
;
391 while (iter
.next_mode (&wider_mode
)
392 && GET_MODE_SIZE (wider_mode
) <= GET_MODE_SIZE (limit_mode
))
393 best_mode
= wider_mode
;
395 return narrow_bit_field_mem (op0
, best_mode
, bitsize
, bitnum
,
401 /* Return true if a bitfield of size BITSIZE at bit number BITNUM within
402 a structure of mode STRUCT_MODE represents a lowpart subreg. The subreg
403 offset is then BITNUM / BITS_PER_UNIT. */
406 lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum
,
407 unsigned HOST_WIDE_INT bitsize
,
408 enum machine_mode struct_mode
)
410 if (BYTES_BIG_ENDIAN
)
411 return (bitnum
% BITS_PER_UNIT
== 0
412 && (bitnum
+ bitsize
== GET_MODE_BITSIZE (struct_mode
)
413 || (bitnum
+ bitsize
) % BITS_PER_WORD
== 0));
415 return bitnum
% BITS_PER_WORD
== 0;
418 /* Return true if OP is a memory and if a bitfield of size BITSIZE at
419 bit number BITNUM can be treated as a simple value of mode MODE. */
422 simple_mem_bitfield_p (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
423 unsigned HOST_WIDE_INT bitnum
, enum machine_mode mode
)
426 && bitnum
% BITS_PER_UNIT
== 0
427 && bitsize
== GET_MODE_BITSIZE (mode
)
428 && (!SLOW_UNALIGNED_ACCESS (mode
, MEM_ALIGN (op0
))
429 || (bitnum
% GET_MODE_ALIGNMENT (mode
) == 0
430 && MEM_ALIGN (op0
) >= GET_MODE_ALIGNMENT (mode
))));
433 /* Try to use instruction INSV to store VALUE into a field of OP0.
434 BITSIZE and BITNUM are as for store_bit_field. */
437 store_bit_field_using_insv (const extraction_insn
*insv
, rtx op0
,
438 unsigned HOST_WIDE_INT bitsize
,
439 unsigned HOST_WIDE_INT bitnum
, rtx value
)
441 struct expand_operand ops
[4];
444 rtx last
= get_last_insn ();
445 bool copy_back
= false;
447 enum machine_mode op_mode
= insv
->field_mode
;
448 unsigned int unit
= GET_MODE_BITSIZE (op_mode
);
449 if (bitsize
== 0 || bitsize
> unit
)
453 /* Get a reference to the first byte of the field. */
454 xop0
= narrow_bit_field_mem (xop0
, insv
->struct_mode
, bitsize
, bitnum
,
458 /* Convert from counting within OP0 to counting in OP_MODE. */
459 if (BYTES_BIG_ENDIAN
)
460 bitnum
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
462 /* If xop0 is a register, we need it in OP_MODE
463 to make it acceptable to the format of insv. */
464 if (GET_CODE (xop0
) == SUBREG
)
465 /* We can't just change the mode, because this might clobber op0,
466 and we will need the original value of op0 if insv fails. */
467 xop0
= gen_rtx_SUBREG (op_mode
, SUBREG_REG (xop0
), SUBREG_BYTE (xop0
));
468 if (REG_P (xop0
) && GET_MODE (xop0
) != op_mode
)
469 xop0
= gen_lowpart_SUBREG (op_mode
, xop0
);
472 /* If the destination is a paradoxical subreg such that we need a
473 truncate to the inner mode, perform the insertion on a temporary and
474 truncate the result to the original destination. Note that we can't
475 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
476 X) 0)) is (reg:N X). */
477 if (GET_CODE (xop0
) == SUBREG
478 && REG_P (SUBREG_REG (xop0
))
479 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0
)),
482 rtx tem
= gen_reg_rtx (op_mode
);
483 emit_move_insn (tem
, xop0
);
488 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
489 "backwards" from the size of the unit we are inserting into.
490 Otherwise, we count bits from the most significant on a
491 BYTES/BITS_BIG_ENDIAN machine. */
493 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
494 bitnum
= unit
- bitsize
- bitnum
;
496 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
498 if (GET_MODE (value
) != op_mode
)
500 if (GET_MODE_BITSIZE (GET_MODE (value
)) >= bitsize
)
502 /* Optimization: Don't bother really extending VALUE
503 if it has all the bits we will actually use. However,
504 if we must narrow it, be sure we do it correctly. */
506 if (GET_MODE_SIZE (GET_MODE (value
)) < GET_MODE_SIZE (op_mode
))
510 tmp
= simplify_subreg (op_mode
, value1
, GET_MODE (value
), 0);
512 tmp
= simplify_gen_subreg (op_mode
,
513 force_reg (GET_MODE (value
),
515 GET_MODE (value
), 0);
519 value1
= gen_lowpart (op_mode
, value1
);
521 else if (CONST_INT_P (value
))
522 value1
= gen_int_mode (INTVAL (value
), op_mode
);
524 /* Parse phase is supposed to make VALUE's data type
525 match that of the component reference, which is a type
526 at least as wide as the field; so VALUE should have
527 a mode that corresponds to that type. */
528 gcc_assert (CONSTANT_P (value
));
531 create_fixed_operand (&ops
[0], xop0
);
532 create_integer_operand (&ops
[1], bitsize
);
533 create_integer_operand (&ops
[2], bitnum
);
534 create_input_operand (&ops
[3], value1
, op_mode
);
535 if (maybe_expand_insn (insv
->icode
, 4, ops
))
538 convert_move (op0
, xop0
, true);
541 delete_insns_since (last
);
545 /* A subroutine of store_bit_field, with the same arguments. Return true
546 if the operation could be implemented.
548 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
549 no other way of implementing the operation. If FALLBACK_P is false,
550 return false instead. */
553 store_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
554 unsigned HOST_WIDE_INT bitnum
,
555 unsigned HOST_WIDE_INT bitregion_start
,
556 unsigned HOST_WIDE_INT bitregion_end
,
557 enum machine_mode fieldmode
,
558 rtx value
, bool fallback_p
)
563 while (GET_CODE (op0
) == SUBREG
)
565 /* The following line once was done only if WORDS_BIG_ENDIAN,
566 but I think that is a mistake. WORDS_BIG_ENDIAN is
567 meaningful at a much higher level; when structures are copied
568 between memory and regs, the higher-numbered regs
569 always get higher addresses. */
570 int inner_mode_size
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)));
571 int outer_mode_size
= GET_MODE_SIZE (GET_MODE (op0
));
574 /* Paradoxical subregs need special handling on big endian machines. */
575 if (SUBREG_BYTE (op0
) == 0 && inner_mode_size
< outer_mode_size
)
577 int difference
= inner_mode_size
- outer_mode_size
;
579 if (WORDS_BIG_ENDIAN
)
580 byte_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
581 if (BYTES_BIG_ENDIAN
)
582 byte_offset
+= difference
% UNITS_PER_WORD
;
585 byte_offset
= SUBREG_BYTE (op0
);
587 bitnum
+= byte_offset
* BITS_PER_UNIT
;
588 op0
= SUBREG_REG (op0
);
591 /* No action is needed if the target is a register and if the field
592 lies completely outside that register. This can occur if the source
593 code contains an out-of-bounds access to a small array. */
594 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
597 /* Use vec_set patterns for inserting parts of vectors whenever
599 if (VECTOR_MODE_P (GET_MODE (op0
))
601 && optab_handler (vec_set_optab
, GET_MODE (op0
)) != CODE_FOR_nothing
602 && fieldmode
== GET_MODE_INNER (GET_MODE (op0
))
603 && bitsize
== GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
604 && !(bitnum
% GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
606 struct expand_operand ops
[3];
607 enum machine_mode outermode
= GET_MODE (op0
);
608 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
609 enum insn_code icode
= optab_handler (vec_set_optab
, outermode
);
610 int pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
612 create_fixed_operand (&ops
[0], op0
);
613 create_input_operand (&ops
[1], value
, innermode
);
614 create_integer_operand (&ops
[2], pos
);
615 if (maybe_expand_insn (icode
, 3, ops
))
619 /* If the target is a register, overwriting the entire object, or storing
620 a full-word or multi-word field can be done with just a SUBREG. */
622 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
623 && ((bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)) && bitnum
== 0)
624 || (bitsize
% BITS_PER_WORD
== 0 && bitnum
% BITS_PER_WORD
== 0)))
626 /* Use the subreg machinery either to narrow OP0 to the required
627 words or to cope with mode punning between equal-sized modes. */
628 rtx sub
= simplify_gen_subreg (fieldmode
, op0
, GET_MODE (op0
),
629 bitnum
/ BITS_PER_UNIT
);
632 emit_move_insn (sub
, value
);
637 /* If the target is memory, storing any naturally aligned field can be
638 done with a simple store. For targets that support fast unaligned
639 memory, any naturally sized, unit aligned field can be done directly. */
640 if (simple_mem_bitfield_p (op0
, bitsize
, bitnum
, fieldmode
))
642 op0
= adjust_bitfield_address (op0
, fieldmode
, bitnum
/ BITS_PER_UNIT
);
643 emit_move_insn (op0
, value
);
647 /* Make sure we are playing with integral modes. Pun with subregs
648 if we aren't. This must come after the entire register case above,
649 since that case is valid for any mode. The following cases are only
650 valid for integral modes. */
652 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
653 if (imode
!= GET_MODE (op0
))
656 op0
= adjust_bitfield_address_size (op0
, imode
, 0, MEM_SIZE (op0
));
659 gcc_assert (imode
!= BLKmode
);
660 op0
= gen_lowpart (imode
, op0
);
665 /* Storing an lsb-aligned field in a register
666 can be done with a movstrict instruction. */
669 && lowpart_bit_field_p (bitnum
, bitsize
, GET_MODE (op0
))
670 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
671 && optab_handler (movstrict_optab
, fieldmode
) != CODE_FOR_nothing
)
673 struct expand_operand ops
[2];
674 enum insn_code icode
= optab_handler (movstrict_optab
, fieldmode
);
676 unsigned HOST_WIDE_INT subreg_off
;
678 if (GET_CODE (arg0
) == SUBREG
)
680 /* Else we've got some float mode source being extracted into
681 a different float mode destination -- this combination of
682 subregs results in Severe Tire Damage. */
683 gcc_assert (GET_MODE (SUBREG_REG (arg0
)) == fieldmode
684 || GET_MODE_CLASS (fieldmode
) == MODE_INT
685 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
);
686 arg0
= SUBREG_REG (arg0
);
689 subreg_off
= bitnum
/ BITS_PER_UNIT
;
690 if (validate_subreg (fieldmode
, GET_MODE (arg0
), arg0
, subreg_off
))
692 arg0
= gen_rtx_SUBREG (fieldmode
, arg0
, subreg_off
);
694 create_fixed_operand (&ops
[0], arg0
);
695 /* Shrink the source operand to FIELDMODE. */
696 create_convert_operand_to (&ops
[1], value
, fieldmode
, false);
697 if (maybe_expand_insn (icode
, 2, ops
))
702 /* Handle fields bigger than a word. */
704 if (bitsize
> BITS_PER_WORD
)
706 /* Here we transfer the words of the field
707 in the order least significant first.
708 This is because the most significant word is the one which may
710 However, only do that if the value is not BLKmode. */
712 unsigned int backwards
= WORDS_BIG_ENDIAN
&& fieldmode
!= BLKmode
;
713 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
717 /* This is the mode we must force value to, so that there will be enough
718 subwords to extract. Note that fieldmode will often (always?) be
719 VOIDmode, because that is what store_field uses to indicate that this
720 is a bit field, but passing VOIDmode to operand_subword_force
722 fieldmode
= GET_MODE (value
);
723 if (fieldmode
== VOIDmode
)
724 fieldmode
= smallest_mode_for_size (nwords
* BITS_PER_WORD
, MODE_INT
);
726 last
= get_last_insn ();
727 for (i
= 0; i
< nwords
; i
++)
729 /* If I is 0, use the low-order word in both field and target;
730 if I is 1, use the next to lowest word; and so on. */
731 unsigned int wordnum
= (backwards
732 ? GET_MODE_SIZE (fieldmode
) / UNITS_PER_WORD
735 unsigned int bit_offset
= (backwards
736 ? MAX ((int) bitsize
- ((int) i
+ 1)
739 : (int) i
* BITS_PER_WORD
);
740 rtx value_word
= operand_subword_force (value
, wordnum
, fieldmode
);
741 unsigned HOST_WIDE_INT new_bitsize
=
742 MIN (BITS_PER_WORD
, bitsize
- i
* BITS_PER_WORD
);
744 /* If the remaining chunk doesn't have full wordsize we have
745 to make sure that for big endian machines the higher order
747 if (new_bitsize
< BITS_PER_WORD
&& BYTES_BIG_ENDIAN
&& !backwards
)
748 value_word
= simplify_expand_binop (word_mode
, lshr_optab
,
750 GEN_INT (BITS_PER_WORD
755 if (!store_bit_field_1 (op0
, new_bitsize
,
757 bitregion_start
, bitregion_end
,
759 value_word
, fallback_p
))
761 delete_insns_since (last
);
768 /* If VALUE has a floating-point or complex mode, access it as an
769 integer of the corresponding size. This can occur on a machine
770 with 64 bit registers that uses SFmode for float. It can also
771 occur for unaligned float or complex fields. */
773 if (GET_MODE (value
) != VOIDmode
774 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_INT
775 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_PARTIAL_INT
)
777 value
= gen_reg_rtx (int_mode_for_mode (GET_MODE (value
)));
778 emit_move_insn (gen_lowpart (GET_MODE (orig_value
), value
), orig_value
);
781 /* If OP0 is a multi-word register, narrow it to the affected word.
782 If the region spans two words, defer to store_split_bit_field. */
783 if (!MEM_P (op0
) && GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
785 op0
= simplify_gen_subreg (word_mode
, op0
, GET_MODE (op0
),
786 bitnum
/ BITS_PER_WORD
* UNITS_PER_WORD
);
788 bitnum
%= BITS_PER_WORD
;
789 if (bitnum
+ bitsize
> BITS_PER_WORD
)
794 store_split_bit_field (op0
, bitsize
, bitnum
, bitregion_start
,
795 bitregion_end
, value
);
800 /* From here on we can assume that the field to be stored in fits
801 within a word. If the destination is a register, it too fits
804 extraction_insn insv
;
806 && get_best_reg_extraction_insn (&insv
, EP_insv
,
807 GET_MODE_BITSIZE (GET_MODE (op0
)),
809 && store_bit_field_using_insv (&insv
, op0
, bitsize
, bitnum
, value
))
812 /* If OP0 is a memory, try copying it to a register and seeing if a
813 cheap register alternative is available. */
816 /* Do not use unaligned memory insvs for volatile bitfields when
817 -fstrict-volatile-bitfields is in effect. */
818 if (!(MEM_VOLATILE_P (op0
)
819 && flag_strict_volatile_bitfields
> 0)
820 && get_best_mem_extraction_insn (&insv
, EP_insv
, bitsize
, bitnum
,
822 && store_bit_field_using_insv (&insv
, op0
, bitsize
, bitnum
, value
))
825 rtx last
= get_last_insn ();
827 /* Try loading part of OP0 into a register, inserting the bitfield
828 into that, and then copying the result back to OP0. */
829 unsigned HOST_WIDE_INT bitpos
;
830 rtx xop0
= adjust_bit_field_mem_for_reg (EP_insv
, op0
, bitsize
, bitnum
,
831 bitregion_start
, bitregion_end
,
835 rtx tempreg
= copy_to_reg (xop0
);
836 if (store_bit_field_1 (tempreg
, bitsize
, bitpos
,
837 bitregion_start
, bitregion_end
,
838 fieldmode
, orig_value
, false))
840 emit_move_insn (xop0
, tempreg
);
843 delete_insns_since (last
);
850 store_fixed_bit_field (op0
, bitsize
, bitnum
, bitregion_start
,
851 bitregion_end
, value
);
855 /* Generate code to store value from rtx VALUE
856 into a bit-field within structure STR_RTX
857 containing BITSIZE bits starting at bit BITNUM.
859 BITREGION_START is bitpos of the first bitfield in this region.
860 BITREGION_END is the bitpos of the ending bitfield in this region.
861 These two fields are 0, if the C++ memory model does not apply,
862 or we are not interested in keeping track of bitfield regions.
864 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
867 store_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
868 unsigned HOST_WIDE_INT bitnum
,
869 unsigned HOST_WIDE_INT bitregion_start
,
870 unsigned HOST_WIDE_INT bitregion_end
,
871 enum machine_mode fieldmode
,
874 /* Under the C++0x memory model, we must not touch bits outside the
875 bit region. Adjust the address to start at the beginning of the
877 if (MEM_P (str_rtx
) && bitregion_start
> 0)
879 enum machine_mode bestmode
;
880 HOST_WIDE_INT offset
, size
;
882 gcc_assert ((bitregion_start
% BITS_PER_UNIT
) == 0);
884 offset
= bitregion_start
/ BITS_PER_UNIT
;
885 bitnum
-= bitregion_start
;
886 size
= (bitnum
+ bitsize
+ BITS_PER_UNIT
- 1) / BITS_PER_UNIT
;
887 bitregion_end
-= bitregion_start
;
889 bestmode
= get_best_mode (bitsize
, bitnum
,
890 bitregion_start
, bitregion_end
,
891 MEM_ALIGN (str_rtx
), VOIDmode
,
892 MEM_VOLATILE_P (str_rtx
));
893 str_rtx
= adjust_bitfield_address_size (str_rtx
, bestmode
, offset
, size
);
896 if (!store_bit_field_1 (str_rtx
, bitsize
, bitnum
,
897 bitregion_start
, bitregion_end
,
898 fieldmode
, value
, true))
902 /* Use shifts and boolean operations to store VALUE into a bit field of
903 width BITSIZE in OP0, starting at bit BITNUM. */
906 store_fixed_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
907 unsigned HOST_WIDE_INT bitnum
,
908 unsigned HOST_WIDE_INT bitregion_start
,
909 unsigned HOST_WIDE_INT bitregion_end
,
912 enum machine_mode mode
;
917 /* There is a case not handled here:
918 a structure with a known alignment of just a halfword
919 and a field split across two aligned halfwords within the structure.
920 Or likewise a structure with a known alignment of just a byte
921 and a field split across two bytes.
922 Such cases are not supposed to be able to occur. */
926 unsigned HOST_WIDE_INT maxbits
= MAX_FIXED_MODE_SIZE
;
929 maxbits
= bitregion_end
- bitregion_start
+ 1;
931 /* Get the proper mode to use for this field. We want a mode that
932 includes the entire field. If such a mode would be larger than
933 a word, we won't be doing the extraction the normal way.
934 We don't want a mode bigger than the destination. */
936 mode
= GET_MODE (op0
);
937 if (GET_MODE_BITSIZE (mode
) == 0
938 || GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (word_mode
))
941 if (MEM_VOLATILE_P (op0
)
942 && GET_MODE_BITSIZE (GET_MODE (op0
)) > 0
943 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= maxbits
944 && flag_strict_volatile_bitfields
> 0)
945 mode
= GET_MODE (op0
);
947 mode
= get_best_mode (bitsize
, bitnum
, bitregion_start
, bitregion_end
,
948 MEM_ALIGN (op0
), mode
, MEM_VOLATILE_P (op0
));
950 if (mode
== VOIDmode
)
952 /* The only way this should occur is if the field spans word
954 store_split_bit_field (op0
, bitsize
, bitnum
, bitregion_start
,
955 bitregion_end
, value
);
959 op0
= narrow_bit_field_mem (op0
, mode
, bitsize
, bitnum
, &bitnum
);
962 mode
= GET_MODE (op0
);
963 gcc_assert (SCALAR_INT_MODE_P (mode
));
965 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
966 for invalid input, such as f5 from gcc.dg/pr48335-2.c. */
968 if (BYTES_BIG_ENDIAN
)
969 /* BITNUM is the distance between our msb
970 and that of the containing datum.
971 Convert it to the distance from the lsb. */
972 bitnum
= GET_MODE_BITSIZE (mode
) - bitsize
- bitnum
;
974 /* Now BITNUM is always the distance between our lsb
977 /* Shift VALUE left by BITNUM bits. If VALUE is not constant,
978 we must first convert its mode to MODE. */
980 if (CONST_INT_P (value
))
982 HOST_WIDE_INT v
= INTVAL (value
);
984 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
985 v
&= ((HOST_WIDE_INT
) 1 << bitsize
) - 1;
989 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
990 && v
== ((HOST_WIDE_INT
) 1 << bitsize
) - 1)
991 || (bitsize
== HOST_BITS_PER_WIDE_INT
&& v
== -1))
994 value
= lshift_value (mode
, value
, bitnum
, bitsize
);
998 int must_and
= (GET_MODE_BITSIZE (GET_MODE (value
)) != bitsize
999 && bitnum
+ bitsize
!= GET_MODE_BITSIZE (mode
));
1001 if (GET_MODE (value
) != mode
)
1002 value
= convert_to_mode (mode
, value
, 1);
1005 value
= expand_binop (mode
, and_optab
, value
,
1006 mask_rtx (mode
, 0, bitsize
, 0),
1007 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1009 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
1010 bitnum
, NULL_RTX
, 1);
1013 /* Now clear the chosen bits in OP0,
1014 except that if VALUE is -1 we need not bother. */
1015 /* We keep the intermediates in registers to allow CSE to combine
1016 consecutive bitfield assignments. */
1018 temp
= force_reg (mode
, op0
);
1022 temp
= expand_binop (mode
, and_optab
, temp
,
1023 mask_rtx (mode
, bitnum
, bitsize
, 1),
1024 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1025 temp
= force_reg (mode
, temp
);
1028 /* Now logical-or VALUE into OP0, unless it is zero. */
1032 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
1033 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1034 temp
= force_reg (mode
, temp
);
1039 op0
= copy_rtx (op0
);
1040 emit_move_insn (op0
, temp
);
1044 /* Store a bit field that is split across multiple accessible memory objects.
1046 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1047 BITSIZE is the field width; BITPOS the position of its first bit
1049 VALUE is the value to store.
1051 This does not yet handle fields wider than BITS_PER_WORD. */
1054 store_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1055 unsigned HOST_WIDE_INT bitpos
,
1056 unsigned HOST_WIDE_INT bitregion_start
,
1057 unsigned HOST_WIDE_INT bitregion_end
,
1061 unsigned int bitsdone
= 0;
1063 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1065 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1066 unit
= BITS_PER_WORD
;
1068 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1070 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1071 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1072 that VALUE might be a floating-point constant. */
1073 if (CONSTANT_P (value
) && !CONST_INT_P (value
))
1075 rtx word
= gen_lowpart_common (word_mode
, value
);
1077 if (word
&& (value
!= word
))
1080 value
= gen_lowpart_common (word_mode
,
1081 force_reg (GET_MODE (value
) != VOIDmode
1083 : word_mode
, value
));
1086 while (bitsdone
< bitsize
)
1088 unsigned HOST_WIDE_INT thissize
;
1090 unsigned HOST_WIDE_INT thispos
;
1091 unsigned HOST_WIDE_INT offset
;
1093 offset
= (bitpos
+ bitsdone
) / unit
;
1094 thispos
= (bitpos
+ bitsdone
) % unit
;
1096 /* When region of bytes we can touch is restricted, decrease
1097 UNIT close to the end of the region as needed. If op0 is a REG
1098 or SUBREG of REG, don't do this, as there can't be data races
1099 on a register and we can expand shorter code in some cases. */
1101 && unit
> BITS_PER_UNIT
1102 && bitpos
+ bitsdone
- thispos
+ unit
> bitregion_end
+ 1
1104 && (GET_CODE (op0
) != SUBREG
|| !REG_P (SUBREG_REG (op0
))))
1110 /* THISSIZE must not overrun a word boundary. Otherwise,
1111 store_fixed_bit_field will call us again, and we will mutually
1113 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1114 thissize
= MIN (thissize
, unit
- thispos
);
1116 if (BYTES_BIG_ENDIAN
)
1118 /* Fetch successively less significant portions. */
1119 if (CONST_INT_P (value
))
1120 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1121 >> (bitsize
- bitsdone
- thissize
))
1122 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1125 int total_bits
= GET_MODE_BITSIZE (GET_MODE (value
));
1126 /* The args are chosen so that the last part includes the
1127 lsb. Give extract_bit_field the value it needs (with
1128 endianness compensation) to fetch the piece we want. */
1129 part
= extract_fixed_bit_field (word_mode
, value
, thissize
,
1130 total_bits
- bitsize
+ bitsdone
,
1131 NULL_RTX
, 1, false);
1136 /* Fetch successively more significant portions. */
1137 if (CONST_INT_P (value
))
1138 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1140 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1142 part
= extract_fixed_bit_field (word_mode
, value
, thissize
,
1143 bitsdone
, NULL_RTX
, 1, false);
1146 /* If OP0 is a register, then handle OFFSET here.
1148 When handling multiword bitfields, extract_bit_field may pass
1149 down a word_mode SUBREG of a larger REG for a bitfield that actually
1150 crosses a word boundary. Thus, for a SUBREG, we must find
1151 the current word starting from the base register. */
1152 if (GET_CODE (op0
) == SUBREG
)
1154 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
)
1155 + (offset
* unit
/ BITS_PER_WORD
);
1156 enum machine_mode sub_mode
= GET_MODE (SUBREG_REG (op0
));
1157 if (sub_mode
!= BLKmode
&& GET_MODE_SIZE (sub_mode
) < UNITS_PER_WORD
)
1158 word
= word_offset
? const0_rtx
: op0
;
1160 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1161 GET_MODE (SUBREG_REG (op0
)));
1162 offset
&= BITS_PER_WORD
/ unit
- 1;
1164 else if (REG_P (op0
))
1166 enum machine_mode op0_mode
= GET_MODE (op0
);
1167 if (op0_mode
!= BLKmode
&& GET_MODE_SIZE (op0_mode
) < UNITS_PER_WORD
)
1168 word
= offset
? const0_rtx
: op0
;
1170 word
= operand_subword_force (op0
, offset
* unit
/ BITS_PER_WORD
,
1172 offset
&= BITS_PER_WORD
/ unit
- 1;
1177 /* OFFSET is in UNITs, and UNIT is in bits. If WORD is const0_rtx,
1178 it is just an out-of-bounds access. Ignore it. */
1179 if (word
!= const0_rtx
)
1180 store_fixed_bit_field (word
, thissize
, offset
* unit
+ thispos
,
1181 bitregion_start
, bitregion_end
, part
);
1182 bitsdone
+= thissize
;
1186 /* A subroutine of extract_bit_field_1 that converts return value X
1187 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1188 to extract_bit_field. */
1191 convert_extracted_bit_field (rtx x
, enum machine_mode mode
,
1192 enum machine_mode tmode
, bool unsignedp
)
1194 if (GET_MODE (x
) == tmode
|| GET_MODE (x
) == mode
)
1197 /* If the x mode is not a scalar integral, first convert to the
1198 integer mode of that size and then access it as a floating-point
1199 value via a SUBREG. */
1200 if (!SCALAR_INT_MODE_P (tmode
))
1202 enum machine_mode smode
;
1204 smode
= mode_for_size (GET_MODE_BITSIZE (tmode
), MODE_INT
, 0);
1205 x
= convert_to_mode (smode
, x
, unsignedp
);
1206 x
= force_reg (smode
, x
);
1207 return gen_lowpart (tmode
, x
);
1210 return convert_to_mode (tmode
, x
, unsignedp
);
1213 /* Try to use an ext(z)v pattern to extract a field from OP0.
1214 Return the extracted value on success, otherwise return null.
1215 EXT_MODE is the mode of the extraction and the other arguments
1216 are as for extract_bit_field. */
1219 extract_bit_field_using_extv (const extraction_insn
*extv
, rtx op0
,
1220 unsigned HOST_WIDE_INT bitsize
,
1221 unsigned HOST_WIDE_INT bitnum
,
1222 int unsignedp
, rtx target
,
1223 enum machine_mode mode
, enum machine_mode tmode
)
1225 struct expand_operand ops
[4];
1226 rtx spec_target
= target
;
1227 rtx spec_target_subreg
= 0;
1228 enum machine_mode ext_mode
= extv
->field_mode
;
1229 unsigned unit
= GET_MODE_BITSIZE (ext_mode
);
1231 if (bitsize
== 0 || unit
< bitsize
)
1235 /* Get a reference to the first byte of the field. */
1236 op0
= narrow_bit_field_mem (op0
, extv
->struct_mode
, bitsize
, bitnum
,
1240 /* Convert from counting within OP0 to counting in EXT_MODE. */
1241 if (BYTES_BIG_ENDIAN
)
1242 bitnum
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
1244 /* If op0 is a register, we need it in EXT_MODE to make it
1245 acceptable to the format of ext(z)v. */
1246 if (GET_CODE (op0
) == SUBREG
&& GET_MODE (op0
) != ext_mode
)
1248 if (REG_P (op0
) && GET_MODE (op0
) != ext_mode
)
1249 op0
= gen_lowpart_SUBREG (ext_mode
, op0
);
1252 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
1253 "backwards" from the size of the unit we are extracting from.
1254 Otherwise, we count bits from the most significant on a
1255 BYTES/BITS_BIG_ENDIAN machine. */
1257 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1258 bitnum
= unit
- bitsize
- bitnum
;
1261 target
= spec_target
= gen_reg_rtx (tmode
);
1263 if (GET_MODE (target
) != ext_mode
)
1265 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1266 between the mode of the extraction (word_mode) and the target
1267 mode. Instead, create a temporary and use convert_move to set
1270 && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (target
), ext_mode
))
1272 target
= gen_lowpart (ext_mode
, target
);
1273 if (GET_MODE_PRECISION (ext_mode
)
1274 > GET_MODE_PRECISION (GET_MODE (spec_target
)))
1275 spec_target_subreg
= target
;
1278 target
= gen_reg_rtx (ext_mode
);
1281 create_output_operand (&ops
[0], target
, ext_mode
);
1282 create_fixed_operand (&ops
[1], op0
);
1283 create_integer_operand (&ops
[2], bitsize
);
1284 create_integer_operand (&ops
[3], bitnum
);
1285 if (maybe_expand_insn (extv
->icode
, 4, ops
))
1287 target
= ops
[0].value
;
1288 if (target
== spec_target
)
1290 if (target
== spec_target_subreg
)
1292 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1297 /* A subroutine of extract_bit_field, with the same arguments.
1298 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1299 if we can find no other means of implementing the operation.
1300 if FALLBACK_P is false, return NULL instead. */
1303 extract_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1304 unsigned HOST_WIDE_INT bitnum
,
1305 int unsignedp
, bool packedp
, rtx target
,
1306 enum machine_mode mode
, enum machine_mode tmode
,
1310 enum machine_mode int_mode
;
1311 enum machine_mode mode1
;
1313 if (tmode
== VOIDmode
)
1316 while (GET_CODE (op0
) == SUBREG
)
1318 bitnum
+= SUBREG_BYTE (op0
) * BITS_PER_UNIT
;
1319 op0
= SUBREG_REG (op0
);
1322 /* If we have an out-of-bounds access to a register, just return an
1323 uninitialized register of the required mode. This can occur if the
1324 source code contains an out-of-bounds access to a small array. */
1325 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
1326 return gen_reg_rtx (tmode
);
1329 && mode
== GET_MODE (op0
)
1331 && bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
1333 /* We're trying to extract a full register from itself. */
1337 /* See if we can get a better vector mode before extracting. */
1338 if (VECTOR_MODE_P (GET_MODE (op0
))
1340 && GET_MODE_INNER (GET_MODE (op0
)) != tmode
)
1342 enum machine_mode new_mode
;
1344 if (GET_MODE_CLASS (tmode
) == MODE_FLOAT
)
1345 new_mode
= MIN_MODE_VECTOR_FLOAT
;
1346 else if (GET_MODE_CLASS (tmode
) == MODE_FRACT
)
1347 new_mode
= MIN_MODE_VECTOR_FRACT
;
1348 else if (GET_MODE_CLASS (tmode
) == MODE_UFRACT
)
1349 new_mode
= MIN_MODE_VECTOR_UFRACT
;
1350 else if (GET_MODE_CLASS (tmode
) == MODE_ACCUM
)
1351 new_mode
= MIN_MODE_VECTOR_ACCUM
;
1352 else if (GET_MODE_CLASS (tmode
) == MODE_UACCUM
)
1353 new_mode
= MIN_MODE_VECTOR_UACCUM
;
1355 new_mode
= MIN_MODE_VECTOR_INT
;
1357 for (; new_mode
!= VOIDmode
; new_mode
= GET_MODE_WIDER_MODE (new_mode
))
1358 if (GET_MODE_SIZE (new_mode
) == GET_MODE_SIZE (GET_MODE (op0
))
1359 && targetm
.vector_mode_supported_p (new_mode
))
1361 if (new_mode
!= VOIDmode
)
1362 op0
= gen_lowpart (new_mode
, op0
);
1365 /* Use vec_extract patterns for extracting parts of vectors whenever
1367 if (VECTOR_MODE_P (GET_MODE (op0
))
1369 && optab_handler (vec_extract_optab
, GET_MODE (op0
)) != CODE_FOR_nothing
1370 && ((bitnum
+ bitsize
- 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
1371 == bitnum
/ GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
1373 struct expand_operand ops
[3];
1374 enum machine_mode outermode
= GET_MODE (op0
);
1375 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
1376 enum insn_code icode
= optab_handler (vec_extract_optab
, outermode
);
1377 unsigned HOST_WIDE_INT pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
1379 create_output_operand (&ops
[0], target
, innermode
);
1380 create_input_operand (&ops
[1], op0
, outermode
);
1381 create_integer_operand (&ops
[2], pos
);
1382 if (maybe_expand_insn (icode
, 3, ops
))
1384 target
= ops
[0].value
;
1385 if (GET_MODE (target
) != mode
)
1386 return gen_lowpart (tmode
, target
);
1391 /* Make sure we are playing with integral modes. Pun with subregs
1394 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
1395 if (imode
!= GET_MODE (op0
))
1398 op0
= adjust_bitfield_address_size (op0
, imode
, 0, MEM_SIZE (op0
));
1399 else if (imode
!= BLKmode
)
1401 op0
= gen_lowpart (imode
, op0
);
1403 /* If we got a SUBREG, force it into a register since we
1404 aren't going to be able to do another SUBREG on it. */
1405 if (GET_CODE (op0
) == SUBREG
)
1406 op0
= force_reg (imode
, op0
);
1408 else if (REG_P (op0
))
1411 imode
= smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0
)),
1413 reg
= gen_reg_rtx (imode
);
1414 subreg
= gen_lowpart_SUBREG (GET_MODE (op0
), reg
);
1415 emit_move_insn (subreg
, op0
);
1417 bitnum
+= SUBREG_BYTE (subreg
) * BITS_PER_UNIT
;
1421 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (op0
));
1422 rtx mem
= assign_stack_temp (GET_MODE (op0
), size
);
1423 emit_move_insn (mem
, op0
);
1424 op0
= adjust_bitfield_address_size (mem
, BLKmode
, 0, size
);
1429 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1430 If that's wrong, the solution is to test for it and set TARGET to 0
1433 /* If the bitfield is volatile, we need to make sure the access
1434 remains on a type-aligned boundary. */
1435 if (GET_CODE (op0
) == MEM
1436 && MEM_VOLATILE_P (op0
)
1437 && GET_MODE_BITSIZE (GET_MODE (op0
)) > 0
1438 && flag_strict_volatile_bitfields
> 0)
1439 goto no_subreg_mode_swap
;
1441 /* Only scalar integer modes can be converted via subregs. There is an
1442 additional problem for FP modes here in that they can have a precision
1443 which is different from the size. mode_for_size uses precision, but
1444 we want a mode based on the size, so we must avoid calling it for FP
1447 if (SCALAR_INT_MODE_P (tmode
))
1449 enum machine_mode try_mode
= mode_for_size (bitsize
,
1450 GET_MODE_CLASS (tmode
), 0);
1451 if (try_mode
!= BLKmode
)
1454 gcc_assert (mode1
!= BLKmode
);
1456 /* Extraction of a full MODE1 value can be done with a subreg as long
1457 as the least significant bit of the value is the least significant
1458 bit of either OP0 or a word of OP0. */
1460 && lowpart_bit_field_p (bitnum
, bitsize
, GET_MODE (op0
))
1461 && bitsize
== GET_MODE_BITSIZE (mode1
)
1462 && TRULY_NOOP_TRUNCATION_MODES_P (mode1
, GET_MODE (op0
)))
1464 rtx sub
= simplify_gen_subreg (mode1
, op0
, GET_MODE (op0
),
1465 bitnum
/ BITS_PER_UNIT
);
1467 return convert_extracted_bit_field (sub
, mode
, tmode
, unsignedp
);
1470 /* Extraction of a full MODE1 value can be done with a load as long as
1471 the field is on a byte boundary and is sufficiently aligned. */
1472 if (simple_mem_bitfield_p (op0
, bitsize
, bitnum
, mode1
))
1474 op0
= adjust_bitfield_address (op0
, mode1
, bitnum
/ BITS_PER_UNIT
);
1475 return convert_extracted_bit_field (op0
, mode
, tmode
, unsignedp
);
1478 no_subreg_mode_swap
:
1480 /* Handle fields bigger than a word. */
1482 if (bitsize
> BITS_PER_WORD
)
1484 /* Here we transfer the words of the field
1485 in the order least significant first.
1486 This is because the most significant word is the one which may
1487 be less than full. */
1489 unsigned int backwards
= WORDS_BIG_ENDIAN
;
1490 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
1494 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1495 target
= gen_reg_rtx (mode
);
1497 /* Indicate for flow that the entire target reg is being set. */
1498 emit_clobber (target
);
1500 last
= get_last_insn ();
1501 for (i
= 0; i
< nwords
; i
++)
1503 /* If I is 0, use the low-order word in both field and target;
1504 if I is 1, use the next to lowest word; and so on. */
1505 /* Word number in TARGET to use. */
1506 unsigned int wordnum
1508 ? GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
- i
- 1
1510 /* Offset from start of field in OP0. */
1511 unsigned int bit_offset
= (backwards
1512 ? MAX ((int) bitsize
- ((int) i
+ 1)
1515 : (int) i
* BITS_PER_WORD
);
1516 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1518 = extract_bit_field_1 (op0
, MIN (BITS_PER_WORD
,
1519 bitsize
- i
* BITS_PER_WORD
),
1520 bitnum
+ bit_offset
, 1, false, target_part
,
1521 mode
, word_mode
, fallback_p
);
1523 gcc_assert (target_part
);
1526 delete_insns_since (last
);
1530 if (result_part
!= target_part
)
1531 emit_move_insn (target_part
, result_part
);
1536 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1537 need to be zero'd out. */
1538 if (GET_MODE_SIZE (GET_MODE (target
)) > nwords
* UNITS_PER_WORD
)
1540 unsigned int i
, total_words
;
1542 total_words
= GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
;
1543 for (i
= nwords
; i
< total_words
; i
++)
1545 (operand_subword (target
,
1546 backwards
? total_words
- i
- 1 : i
,
1553 /* Signed bit field: sign-extend with two arithmetic shifts. */
1554 target
= expand_shift (LSHIFT_EXPR
, mode
, target
,
1555 GET_MODE_BITSIZE (mode
) - bitsize
, NULL_RTX
, 0);
1556 return expand_shift (RSHIFT_EXPR
, mode
, target
,
1557 GET_MODE_BITSIZE (mode
) - bitsize
, NULL_RTX
, 0);
1560 /* If OP0 is a multi-word register, narrow it to the affected word.
1561 If the region spans two words, defer to extract_split_bit_field. */
1562 if (!MEM_P (op0
) && GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
1564 op0
= simplify_gen_subreg (word_mode
, op0
, GET_MODE (op0
),
1565 bitnum
/ BITS_PER_WORD
* UNITS_PER_WORD
);
1566 bitnum
%= BITS_PER_WORD
;
1567 if (bitnum
+ bitsize
> BITS_PER_WORD
)
1571 target
= extract_split_bit_field (op0
, bitsize
, bitnum
, unsignedp
);
1572 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1576 /* From here on we know the desired field is smaller than a word.
1577 If OP0 is a register, it too fits within a word. */
1578 enum extraction_pattern pattern
= unsignedp
? EP_extzv
: EP_extv
;
1579 extraction_insn extv
;
1581 /* ??? We could limit the structure size to the part of OP0 that
1582 contains the field, with appropriate checks for endianness
1583 and TRULY_NOOP_TRUNCATION. */
1584 && get_best_reg_extraction_insn (&extv
, pattern
,
1585 GET_MODE_BITSIZE (GET_MODE (op0
)),
1588 rtx result
= extract_bit_field_using_extv (&extv
, op0
, bitsize
, bitnum
,
1589 unsignedp
, target
, mode
,
1595 /* If OP0 is a memory, try copying it to a register and seeing if a
1596 cheap register alternative is available. */
1599 /* Do not use extv/extzv for volatile bitfields when
1600 -fstrict-volatile-bitfields is in effect. */
1601 if (!(MEM_VOLATILE_P (op0
) && flag_strict_volatile_bitfields
> 0)
1602 && get_best_mem_extraction_insn (&extv
, pattern
, bitsize
, bitnum
,
1605 rtx result
= extract_bit_field_using_extv (&extv
, op0
, bitsize
,
1613 rtx last
= get_last_insn ();
1615 /* Try loading part of OP0 into a register and extracting the
1616 bitfield from that. */
1617 unsigned HOST_WIDE_INT bitpos
;
1618 rtx xop0
= adjust_bit_field_mem_for_reg (pattern
, op0
, bitsize
, bitnum
,
1619 0, 0, tmode
, &bitpos
);
1622 xop0
= copy_to_reg (xop0
);
1623 rtx result
= extract_bit_field_1 (xop0
, bitsize
, bitpos
,
1624 unsignedp
, packedp
, target
,
1625 mode
, tmode
, false);
1628 delete_insns_since (last
);
1635 /* Find a correspondingly-sized integer field, so we can apply
1636 shifts and masks to it. */
1637 int_mode
= int_mode_for_mode (tmode
);
1638 if (int_mode
== BLKmode
)
1639 int_mode
= int_mode_for_mode (mode
);
1640 /* Should probably push op0 out to memory and then do a load. */
1641 gcc_assert (int_mode
!= BLKmode
);
1643 target
= extract_fixed_bit_field (int_mode
, op0
, bitsize
, bitnum
,
1644 target
, unsignedp
, packedp
);
1645 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1648 /* Generate code to extract a byte-field from STR_RTX
1649 containing BITSIZE bits, starting at BITNUM,
1650 and put it in TARGET if possible (if TARGET is nonzero).
1651 Regardless of TARGET, we return the rtx for where the value is placed.
1653 STR_RTX is the structure containing the byte (a REG or MEM).
1654 UNSIGNEDP is nonzero if this is an unsigned bit field.
1655 PACKEDP is nonzero if the field has the packed attribute.
1656 MODE is the natural mode of the field value once extracted.
1657 TMODE is the mode the caller would like the value to have;
1658 but the value may be returned with type MODE instead.
1660 If a TARGET is specified and we can store in it at no extra cost,
1661 we do so, and return TARGET.
1662 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1663 if they are equally easy. */
1666 extract_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1667 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, bool packedp
,
1668 rtx target
, enum machine_mode mode
, enum machine_mode tmode
)
1670 return extract_bit_field_1 (str_rtx
, bitsize
, bitnum
, unsignedp
, packedp
,
1671 target
, mode
, tmode
, true);
1674 /* Use shifts and boolean operations to extract a field of BITSIZE bits
1675 from bit BITNUM of OP0.
1677 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1678 PACKEDP is true if the field has the packed attribute.
1680 If TARGET is nonzero, attempts to store the value there
1681 and return TARGET, but this is not guaranteed.
1682 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1685 extract_fixed_bit_field (enum machine_mode tmode
, rtx op0
,
1686 unsigned HOST_WIDE_INT bitsize
,
1687 unsigned HOST_WIDE_INT bitnum
, rtx target
,
1688 int unsignedp
, bool packedp
)
1690 enum machine_mode mode
;
1694 /* Get the proper mode to use for this field. We want a mode that
1695 includes the entire field. If such a mode would be larger than
1696 a word, we won't be doing the extraction the normal way. */
1698 if (MEM_VOLATILE_P (op0
)
1699 && flag_strict_volatile_bitfields
> 0)
1701 if (GET_MODE_BITSIZE (GET_MODE (op0
)) > 0)
1702 mode
= GET_MODE (op0
);
1703 else if (target
&& GET_MODE_BITSIZE (GET_MODE (target
)) > 0)
1704 mode
= GET_MODE (target
);
1709 mode
= get_best_mode (bitsize
, bitnum
, 0, 0,
1710 MEM_ALIGN (op0
), word_mode
, MEM_VOLATILE_P (op0
));
1712 if (mode
== VOIDmode
)
1713 /* The only way this should occur is if the field spans word
1715 return extract_split_bit_field (op0
, bitsize
, bitnum
, unsignedp
);
1717 unsigned int total_bits
= GET_MODE_BITSIZE (mode
);
1718 HOST_WIDE_INT bit_offset
= bitnum
- bitnum
% total_bits
;
1720 /* If we're accessing a volatile MEM, we can't apply BIT_OFFSET
1721 if it results in a multi-word access where we otherwise wouldn't
1722 have one. So, check for that case here. */
1724 && MEM_VOLATILE_P (op0
)
1725 && flag_strict_volatile_bitfields
> 0
1726 && bitnum
% BITS_PER_UNIT
+ bitsize
<= total_bits
1727 && bitnum
% GET_MODE_BITSIZE (mode
) + bitsize
> total_bits
)
1729 if (STRICT_ALIGNMENT
)
1731 static bool informed_about_misalignment
= false;
1735 if (bitsize
== total_bits
)
1736 warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1737 "multiple accesses to volatile structure"
1738 " member because of packed attribute");
1740 warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1741 "multiple accesses to volatile structure"
1742 " bitfield because of packed attribute");
1744 return extract_split_bit_field (op0
, bitsize
, bitnum
,
1748 if (bitsize
== total_bits
)
1749 warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1750 "mis-aligned access used for structure member");
1752 warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1753 "mis-aligned access used for structure bitfield");
1755 if (! informed_about_misalignment
)
1757 informed_about_misalignment
= true;
1758 inform (input_location
,
1759 "when a volatile object spans multiple type-sized"
1760 " locations, the compiler must choose between using"
1761 " a single mis-aligned access to preserve the"
1762 " volatility, or using multiple aligned accesses"
1763 " to avoid runtime faults; this code may fail at"
1764 " runtime if the hardware does not allow this"
1768 bit_offset
= bitnum
- bitnum
% BITS_PER_UNIT
;
1770 op0
= adjust_bitfield_address (op0
, mode
, bit_offset
/ BITS_PER_UNIT
);
1771 bitnum
-= bit_offset
;
1774 mode
= GET_MODE (op0
);
1775 gcc_assert (SCALAR_INT_MODE_P (mode
));
1777 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1778 for invalid input, such as extract equivalent of f5 from
1779 gcc.dg/pr48335-2.c. */
1781 if (BYTES_BIG_ENDIAN
)
1782 /* BITNUM is the distance between our msb and that of OP0.
1783 Convert it to the distance from the lsb. */
1784 bitnum
= GET_MODE_BITSIZE (mode
) - bitsize
- bitnum
;
1786 /* Now BITNUM is always the distance between the field's lsb and that of OP0.
1787 We have reduced the big-endian case to the little-endian case. */
1793 /* If the field does not already start at the lsb,
1794 shift it so it does. */
1795 /* Maybe propagate the target for the shift. */
1796 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1799 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, bitnum
, subtarget
, 1);
1801 /* Convert the value to the desired mode. */
1803 op0
= convert_to_mode (tmode
, op0
, 1);
1805 /* Unless the msb of the field used to be the msb when we shifted,
1806 mask out the upper bits. */
1808 if (GET_MODE_BITSIZE (mode
) != bitnum
+ bitsize
)
1809 return expand_binop (GET_MODE (op0
), and_optab
, op0
,
1810 mask_rtx (GET_MODE (op0
), 0, bitsize
, 0),
1811 target
, 1, OPTAB_LIB_WIDEN
);
1815 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1816 then arithmetic-shift its lsb to the lsb of the word. */
1817 op0
= force_reg (mode
, op0
);
1819 /* Find the narrowest integer mode that contains the field. */
1821 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1822 mode
= GET_MODE_WIDER_MODE (mode
))
1823 if (GET_MODE_BITSIZE (mode
) >= bitsize
+ bitnum
)
1825 op0
= convert_to_mode (mode
, op0
, 0);
1832 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitnum
))
1834 int amount
= GET_MODE_BITSIZE (mode
) - (bitsize
+ bitnum
);
1835 /* Maybe propagate the target for the shift. */
1836 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1837 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1840 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
1841 GET_MODE_BITSIZE (mode
) - bitsize
, target
, 0);
1844 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1845 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1846 complement of that if COMPLEMENT. The mask is truncated if
1847 necessary to the width of mode MODE. The mask is zero-extended if
1848 BITSIZE+BITPOS is too small for MODE. */
1851 mask_rtx (enum machine_mode mode
, int bitpos
, int bitsize
, int complement
)
1855 mask
= double_int::mask (bitsize
);
1856 mask
= mask
.llshift (bitpos
, HOST_BITS_PER_DOUBLE_INT
);
1861 return immed_double_int_const (mask
, mode
);
1864 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1865 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1868 lshift_value (enum machine_mode mode
, rtx value
, int bitpos
, int bitsize
)
1872 val
= double_int::from_uhwi (INTVAL (value
)).zext (bitsize
);
1873 val
= val
.llshift (bitpos
, HOST_BITS_PER_DOUBLE_INT
);
1875 return immed_double_int_const (val
, mode
);
1878 /* Extract a bit field that is split across two words
1879 and return an RTX for the result.
1881 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1882 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1883 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1886 extract_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1887 unsigned HOST_WIDE_INT bitpos
, int unsignedp
)
1890 unsigned int bitsdone
= 0;
1891 rtx result
= NULL_RTX
;
1894 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1896 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1897 unit
= BITS_PER_WORD
;
1899 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1901 while (bitsdone
< bitsize
)
1903 unsigned HOST_WIDE_INT thissize
;
1905 unsigned HOST_WIDE_INT thispos
;
1906 unsigned HOST_WIDE_INT offset
;
1908 offset
= (bitpos
+ bitsdone
) / unit
;
1909 thispos
= (bitpos
+ bitsdone
) % unit
;
1911 /* THISSIZE must not overrun a word boundary. Otherwise,
1912 extract_fixed_bit_field will call us again, and we will mutually
1914 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1915 thissize
= MIN (thissize
, unit
- thispos
);
1917 /* If OP0 is a register, then handle OFFSET here.
1919 When handling multiword bitfields, extract_bit_field may pass
1920 down a word_mode SUBREG of a larger REG for a bitfield that actually
1921 crosses a word boundary. Thus, for a SUBREG, we must find
1922 the current word starting from the base register. */
1923 if (GET_CODE (op0
) == SUBREG
)
1925 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1926 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1927 GET_MODE (SUBREG_REG (op0
)));
1930 else if (REG_P (op0
))
1932 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1938 /* Extract the parts in bit-counting order,
1939 whose meaning is determined by BYTES_PER_UNIT.
1940 OFFSET is in UNITs, and UNIT is in bits. */
1941 part
= extract_fixed_bit_field (word_mode
, word
, thissize
,
1942 offset
* unit
+ thispos
, 0, 1, false);
1943 bitsdone
+= thissize
;
1945 /* Shift this part into place for the result. */
1946 if (BYTES_BIG_ENDIAN
)
1948 if (bitsize
!= bitsdone
)
1949 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1950 bitsize
- bitsdone
, 0, 1);
1954 if (bitsdone
!= thissize
)
1955 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1956 bitsdone
- thissize
, 0, 1);
1962 /* Combine the parts with bitwise or. This works
1963 because we extracted each part as an unsigned bit field. */
1964 result
= expand_binop (word_mode
, ior_optab
, part
, result
, NULL_RTX
, 1,
1970 /* Unsigned bit field: we are done. */
1973 /* Signed bit field: sign-extend with two arithmetic shifts. */
1974 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
1975 BITS_PER_WORD
- bitsize
, NULL_RTX
, 0);
1976 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
1977 BITS_PER_WORD
- bitsize
, NULL_RTX
, 0);
1980 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
1981 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
1982 MODE, fill the upper bits with zeros. Fail if the layout of either
1983 mode is unknown (as for CC modes) or if the extraction would involve
1984 unprofitable mode punning. Return the value on success, otherwise
1987 This is different from gen_lowpart* in these respects:
1989 - the returned value must always be considered an rvalue
1991 - when MODE is wider than SRC_MODE, the extraction involves
1994 - when MODE is smaller than SRC_MODE, the extraction involves
1995 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
1997 In other words, this routine performs a computation, whereas the
1998 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2002 extract_low_bits (enum machine_mode mode
, enum machine_mode src_mode
, rtx src
)
2004 enum machine_mode int_mode
, src_int_mode
;
2006 if (mode
== src_mode
)
2009 if (CONSTANT_P (src
))
2011 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2012 fails, it will happily create (subreg (symbol_ref)) or similar
2014 unsigned int byte
= subreg_lowpart_offset (mode
, src_mode
);
2015 rtx ret
= simplify_subreg (mode
, src
, src_mode
, byte
);
2019 if (GET_MODE (src
) == VOIDmode
2020 || !validate_subreg (mode
, src_mode
, src
, byte
))
2023 src
= force_reg (GET_MODE (src
), src
);
2024 return gen_rtx_SUBREG (mode
, src
, byte
);
2027 if (GET_MODE_CLASS (mode
) == MODE_CC
|| GET_MODE_CLASS (src_mode
) == MODE_CC
)
2030 if (GET_MODE_BITSIZE (mode
) == GET_MODE_BITSIZE (src_mode
)
2031 && MODES_TIEABLE_P (mode
, src_mode
))
2033 rtx x
= gen_lowpart_common (mode
, src
);
2038 src_int_mode
= int_mode_for_mode (src_mode
);
2039 int_mode
= int_mode_for_mode (mode
);
2040 if (src_int_mode
== BLKmode
|| int_mode
== BLKmode
)
2043 if (!MODES_TIEABLE_P (src_int_mode
, src_mode
))
2045 if (!MODES_TIEABLE_P (int_mode
, mode
))
2048 src
= gen_lowpart (src_int_mode
, src
);
2049 src
= convert_modes (int_mode
, src_int_mode
, src
, true);
2050 src
= gen_lowpart (mode
, src
);
2054 /* Add INC into TARGET. */
2057 expand_inc (rtx target
, rtx inc
)
2059 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
2061 target
, 0, OPTAB_LIB_WIDEN
);
2062 if (value
!= target
)
2063 emit_move_insn (target
, value
);
2066 /* Subtract DEC from TARGET. */
2069 expand_dec (rtx target
, rtx dec
)
2071 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
2073 target
, 0, OPTAB_LIB_WIDEN
);
2074 if (value
!= target
)
2075 emit_move_insn (target
, value
);
2078 /* Output a shift instruction for expression code CODE,
2079 with SHIFTED being the rtx for the value to shift,
2080 and AMOUNT the rtx for the amount to shift by.
2081 Store the result in the rtx TARGET, if that is convenient.
2082 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2083 Return the rtx for where the value is. */
2086 expand_shift_1 (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2087 rtx amount
, rtx target
, int unsignedp
)
2090 int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
2091 int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
2092 optab lshift_optab
= ashl_optab
;
2093 optab rshift_arith_optab
= ashr_optab
;
2094 optab rshift_uns_optab
= lshr_optab
;
2095 optab lrotate_optab
= rotl_optab
;
2096 optab rrotate_optab
= rotr_optab
;
2097 enum machine_mode op1_mode
;
2099 bool speed
= optimize_insn_for_speed_p ();
2102 op1_mode
= GET_MODE (op1
);
2104 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2105 shift amount is a vector, use the vector/vector shift patterns. */
2106 if (VECTOR_MODE_P (mode
) && VECTOR_MODE_P (op1_mode
))
2108 lshift_optab
= vashl_optab
;
2109 rshift_arith_optab
= vashr_optab
;
2110 rshift_uns_optab
= vlshr_optab
;
2111 lrotate_optab
= vrotl_optab
;
2112 rrotate_optab
= vrotr_optab
;
2115 /* Previously detected shift-counts computed by NEGATE_EXPR
2116 and shifted in the other direction; but that does not work
2119 if (SHIFT_COUNT_TRUNCATED
)
2121 if (CONST_INT_P (op1
)
2122 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
) >=
2123 (unsigned HOST_WIDE_INT
) GET_MODE_BITSIZE (mode
)))
2124 op1
= GEN_INT ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
2125 % GET_MODE_BITSIZE (mode
));
2126 else if (GET_CODE (op1
) == SUBREG
2127 && subreg_lowpart_p (op1
)
2128 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1
)))
2129 && SCALAR_INT_MODE_P (GET_MODE (op1
)))
2130 op1
= SUBREG_REG (op1
);
2133 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
2134 prefer left rotation, if op1 is from bitsize / 2 + 1 to
2135 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
2138 && CONST_INT_P (op1
)
2139 && IN_RANGE (INTVAL (op1
), GET_MODE_BITSIZE (mode
) / 2 + left
,
2140 GET_MODE_BITSIZE (mode
) - 1))
2142 op1
= GEN_INT (GET_MODE_BITSIZE (mode
) - INTVAL (op1
));
2144 code
= left
? LROTATE_EXPR
: RROTATE_EXPR
;
2147 if (op1
== const0_rtx
)
2150 /* Check whether its cheaper to implement a left shift by a constant
2151 bit count by a sequence of additions. */
2152 if (code
== LSHIFT_EXPR
2153 && CONST_INT_P (op1
)
2155 && INTVAL (op1
) < GET_MODE_PRECISION (mode
)
2156 && INTVAL (op1
) < MAX_BITS_PER_WORD
2157 && (shift_cost (speed
, mode
, INTVAL (op1
))
2158 > INTVAL (op1
) * add_cost (speed
, mode
))
2159 && shift_cost (speed
, mode
, INTVAL (op1
)) != MAX_COST
)
2162 for (i
= 0; i
< INTVAL (op1
); i
++)
2164 temp
= force_reg (mode
, shifted
);
2165 shifted
= expand_binop (mode
, add_optab
, temp
, temp
, NULL_RTX
,
2166 unsignedp
, OPTAB_LIB_WIDEN
);
2171 for (attempt
= 0; temp
== 0 && attempt
< 3; attempt
++)
2173 enum optab_methods methods
;
2176 methods
= OPTAB_DIRECT
;
2177 else if (attempt
== 1)
2178 methods
= OPTAB_WIDEN
;
2180 methods
= OPTAB_LIB_WIDEN
;
2184 /* Widening does not work for rotation. */
2185 if (methods
== OPTAB_WIDEN
)
2187 else if (methods
== OPTAB_LIB_WIDEN
)
2189 /* If we have been unable to open-code this by a rotation,
2190 do it as the IOR of two shifts. I.e., to rotate A
2192 (A << N) | ((unsigned) A >> ((-N) & (C - 1)))
2193 where C is the bitsize of A.
2195 It is theoretically possible that the target machine might
2196 not be able to perform either shift and hence we would
2197 be making two libcalls rather than just the one for the
2198 shift (similarly if IOR could not be done). We will allow
2199 this extremely unlikely lossage to avoid complicating the
2202 rtx subtarget
= target
== shifted
? 0 : target
;
2203 rtx new_amount
, other_amount
;
2207 if (op1
== const0_rtx
)
2209 else if (CONST_INT_P (op1
))
2210 other_amount
= GEN_INT (GET_MODE_BITSIZE (mode
)
2215 = simplify_gen_unary (NEG
, GET_MODE (op1
),
2216 op1
, GET_MODE (op1
));
2218 = simplify_gen_binary (AND
, GET_MODE (op1
),
2220 GEN_INT (GET_MODE_PRECISION (mode
)
2224 shifted
= force_reg (mode
, shifted
);
2226 temp
= expand_shift_1 (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
2227 mode
, shifted
, new_amount
, 0, 1);
2228 temp1
= expand_shift_1 (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
2229 mode
, shifted
, other_amount
,
2231 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
2232 unsignedp
, methods
);
2235 temp
= expand_binop (mode
,
2236 left
? lrotate_optab
: rrotate_optab
,
2237 shifted
, op1
, target
, unsignedp
, methods
);
2240 temp
= expand_binop (mode
,
2241 left
? lshift_optab
: rshift_uns_optab
,
2242 shifted
, op1
, target
, unsignedp
, methods
);
2244 /* Do arithmetic shifts.
2245 Also, if we are going to widen the operand, we can just as well
2246 use an arithmetic right-shift instead of a logical one. */
2247 if (temp
== 0 && ! rotate
2248 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
2250 enum optab_methods methods1
= methods
;
2252 /* If trying to widen a log shift to an arithmetic shift,
2253 don't accept an arithmetic shift of the same size. */
2255 methods1
= OPTAB_MUST_WIDEN
;
2257 /* Arithmetic shift */
2259 temp
= expand_binop (mode
,
2260 left
? lshift_optab
: rshift_arith_optab
,
2261 shifted
, op1
, target
, unsignedp
, methods1
);
2264 /* We used to try extzv here for logical right shifts, but that was
2265 only useful for one machine, the VAX, and caused poor code
2266 generation there for lshrdi3, so the code was deleted and a
2267 define_expand for lshrsi3 was added to vax.md. */
2274 /* Output a shift instruction for expression code CODE,
2275 with SHIFTED being the rtx for the value to shift,
2276 and AMOUNT the amount to shift by.
2277 Store the result in the rtx TARGET, if that is convenient.
2278 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2279 Return the rtx for where the value is. */
2282 expand_shift (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2283 int amount
, rtx target
, int unsignedp
)
2285 return expand_shift_1 (code
, mode
,
2286 shifted
, GEN_INT (amount
), target
, unsignedp
);
2289 /* Output a shift instruction for expression code CODE,
2290 with SHIFTED being the rtx for the value to shift,
2291 and AMOUNT the tree for the amount to shift by.
2292 Store the result in the rtx TARGET, if that is convenient.
2293 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2294 Return the rtx for where the value is. */
2297 expand_variable_shift (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2298 tree amount
, rtx target
, int unsignedp
)
2300 return expand_shift_1 (code
, mode
,
2301 shifted
, expand_normal (amount
), target
, unsignedp
);
2305 /* Indicates the type of fixup needed after a constant multiplication.
2306 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2307 the result should be negated, and ADD_VARIANT means that the
2308 multiplicand should be added to the result. */
2309 enum mult_variant
{basic_variant
, negate_variant
, add_variant
};
2311 static void synth_mult (struct algorithm
*, unsigned HOST_WIDE_INT
,
2312 const struct mult_cost
*, enum machine_mode mode
);
2313 static bool choose_mult_variant (enum machine_mode
, HOST_WIDE_INT
,
2314 struct algorithm
*, enum mult_variant
*, int);
2315 static rtx
expand_mult_const (enum machine_mode
, rtx
, HOST_WIDE_INT
, rtx
,
2316 const struct algorithm
*, enum mult_variant
);
2317 static unsigned HOST_WIDE_INT
invert_mod2n (unsigned HOST_WIDE_INT
, int);
2318 static rtx
extract_high_half (enum machine_mode
, rtx
);
2319 static rtx
expmed_mult_highpart (enum machine_mode
, rtx
, rtx
, rtx
, int, int);
2320 static rtx
expmed_mult_highpart_optab (enum machine_mode
, rtx
, rtx
, rtx
,
2322 /* Compute and return the best algorithm for multiplying by T.
2323 The algorithm must cost less than cost_limit
2324 If retval.cost >= COST_LIMIT, no algorithm was found and all
2325 other field of the returned struct are undefined.
2326 MODE is the machine mode of the multiplication. */
2329 synth_mult (struct algorithm
*alg_out
, unsigned HOST_WIDE_INT t
,
2330 const struct mult_cost
*cost_limit
, enum machine_mode mode
)
2333 struct algorithm
*alg_in
, *best_alg
;
2334 struct mult_cost best_cost
;
2335 struct mult_cost new_limit
;
2336 int op_cost
, op_latency
;
2337 unsigned HOST_WIDE_INT orig_t
= t
;
2338 unsigned HOST_WIDE_INT q
;
2339 int maxm
, hash_index
;
2340 bool cache_hit
= false;
2341 enum alg_code cache_alg
= alg_zero
;
2342 bool speed
= optimize_insn_for_speed_p ();
2343 enum machine_mode imode
;
2344 struct alg_hash_entry
*entry_ptr
;
2346 /* Indicate that no algorithm is yet found. If no algorithm
2347 is found, this value will be returned and indicate failure. */
2348 alg_out
->cost
.cost
= cost_limit
->cost
+ 1;
2349 alg_out
->cost
.latency
= cost_limit
->latency
+ 1;
2351 if (cost_limit
->cost
< 0
2352 || (cost_limit
->cost
== 0 && cost_limit
->latency
<= 0))
2355 /* Be prepared for vector modes. */
2356 imode
= GET_MODE_INNER (mode
);
2357 if (imode
== VOIDmode
)
2360 maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (imode
));
2362 /* Restrict the bits of "t" to the multiplication's mode. */
2363 t
&= GET_MODE_MASK (imode
);
2365 /* t == 1 can be done in zero cost. */
2369 alg_out
->cost
.cost
= 0;
2370 alg_out
->cost
.latency
= 0;
2371 alg_out
->op
[0] = alg_m
;
2375 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2379 if (MULT_COST_LESS (cost_limit
, zero_cost (speed
)))
2384 alg_out
->cost
.cost
= zero_cost (speed
);
2385 alg_out
->cost
.latency
= zero_cost (speed
);
2386 alg_out
->op
[0] = alg_zero
;
2391 /* We'll be needing a couple extra algorithm structures now. */
2393 alg_in
= XALLOCA (struct algorithm
);
2394 best_alg
= XALLOCA (struct algorithm
);
2395 best_cost
= *cost_limit
;
2397 /* Compute the hash index. */
2398 hash_index
= (t
^ (unsigned int) mode
^ (speed
* 256)) % NUM_ALG_HASH_ENTRIES
;
2400 /* See if we already know what to do for T. */
2401 entry_ptr
= alg_hash_entry_ptr (hash_index
);
2402 if (entry_ptr
->t
== t
2403 && entry_ptr
->mode
== mode
2404 && entry_ptr
->mode
== mode
2405 && entry_ptr
->speed
== speed
2406 && entry_ptr
->alg
!= alg_unknown
)
2408 cache_alg
= entry_ptr
->alg
;
2410 if (cache_alg
== alg_impossible
)
2412 /* The cache tells us that it's impossible to synthesize
2413 multiplication by T within entry_ptr->cost. */
2414 if (!CHEAPER_MULT_COST (&entry_ptr
->cost
, cost_limit
))
2415 /* COST_LIMIT is at least as restrictive as the one
2416 recorded in the hash table, in which case we have no
2417 hope of synthesizing a multiplication. Just
2421 /* If we get here, COST_LIMIT is less restrictive than the
2422 one recorded in the hash table, so we may be able to
2423 synthesize a multiplication. Proceed as if we didn't
2424 have the cache entry. */
2428 if (CHEAPER_MULT_COST (cost_limit
, &entry_ptr
->cost
))
2429 /* The cached algorithm shows that this multiplication
2430 requires more cost than COST_LIMIT. Just return. This
2431 way, we don't clobber this cache entry with
2432 alg_impossible but retain useful information. */
2444 goto do_alg_addsub_t_m2
;
2446 case alg_add_factor
:
2447 case alg_sub_factor
:
2448 goto do_alg_addsub_factor
;
2451 goto do_alg_add_t2_m
;
2454 goto do_alg_sub_t2_m
;
2462 /* If we have a group of zero bits at the low-order part of T, try
2463 multiplying by the remaining bits and then doing a shift. */
2468 m
= floor_log2 (t
& -t
); /* m = number of low zero bits */
2472 /* The function expand_shift will choose between a shift and
2473 a sequence of additions, so the observed cost is given as
2474 MIN (m * add_cost(speed, mode), shift_cost(speed, mode, m)). */
2475 op_cost
= m
* add_cost (speed
, mode
);
2476 if (shift_cost (speed
, mode
, m
) < op_cost
)
2477 op_cost
= shift_cost (speed
, mode
, m
);
2478 new_limit
.cost
= best_cost
.cost
- op_cost
;
2479 new_limit
.latency
= best_cost
.latency
- op_cost
;
2480 synth_mult (alg_in
, q
, &new_limit
, mode
);
2482 alg_in
->cost
.cost
+= op_cost
;
2483 alg_in
->cost
.latency
+= op_cost
;
2484 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2486 struct algorithm
*x
;
2487 best_cost
= alg_in
->cost
;
2488 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2489 best_alg
->log
[best_alg
->ops
] = m
;
2490 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2493 /* See if treating ORIG_T as a signed number yields a better
2494 sequence. Try this sequence only for a negative ORIG_T
2495 as it would be useless for a non-negative ORIG_T. */
2496 if ((HOST_WIDE_INT
) orig_t
< 0)
2498 /* Shift ORIG_T as follows because a right shift of a
2499 negative-valued signed type is implementation
2501 q
= ~(~orig_t
>> m
);
2502 /* The function expand_shift will choose between a shift
2503 and a sequence of additions, so the observed cost is
2504 given as MIN (m * add_cost(speed, mode),
2505 shift_cost(speed, mode, m)). */
2506 op_cost
= m
* add_cost (speed
, mode
);
2507 if (shift_cost (speed
, mode
, m
) < op_cost
)
2508 op_cost
= shift_cost (speed
, mode
, m
);
2509 new_limit
.cost
= best_cost
.cost
- op_cost
;
2510 new_limit
.latency
= best_cost
.latency
- op_cost
;
2511 synth_mult (alg_in
, q
, &new_limit
, mode
);
2513 alg_in
->cost
.cost
+= op_cost
;
2514 alg_in
->cost
.latency
+= op_cost
;
2515 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2517 struct algorithm
*x
;
2518 best_cost
= alg_in
->cost
;
2519 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2520 best_alg
->log
[best_alg
->ops
] = m
;
2521 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2529 /* If we have an odd number, add or subtract one. */
2532 unsigned HOST_WIDE_INT w
;
2535 for (w
= 1; (w
& t
) != 0; w
<<= 1)
2537 /* If T was -1, then W will be zero after the loop. This is another
2538 case where T ends with ...111. Handling this with (T + 1) and
2539 subtract 1 produces slightly better code and results in algorithm
2540 selection much faster than treating it like the ...0111 case
2544 /* Reject the case where t is 3.
2545 Thus we prefer addition in that case. */
2548 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2550 op_cost
= add_cost (speed
, mode
);
2551 new_limit
.cost
= best_cost
.cost
- op_cost
;
2552 new_limit
.latency
= best_cost
.latency
- op_cost
;
2553 synth_mult (alg_in
, t
+ 1, &new_limit
, mode
);
2555 alg_in
->cost
.cost
+= op_cost
;
2556 alg_in
->cost
.latency
+= op_cost
;
2557 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2559 struct algorithm
*x
;
2560 best_cost
= alg_in
->cost
;
2561 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2562 best_alg
->log
[best_alg
->ops
] = 0;
2563 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2568 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2570 op_cost
= add_cost (speed
, mode
);
2571 new_limit
.cost
= best_cost
.cost
- op_cost
;
2572 new_limit
.latency
= best_cost
.latency
- op_cost
;
2573 synth_mult (alg_in
, t
- 1, &new_limit
, mode
);
2575 alg_in
->cost
.cost
+= op_cost
;
2576 alg_in
->cost
.latency
+= op_cost
;
2577 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2579 struct algorithm
*x
;
2580 best_cost
= alg_in
->cost
;
2581 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2582 best_alg
->log
[best_alg
->ops
] = 0;
2583 best_alg
->op
[best_alg
->ops
] = alg_add_t_m2
;
2587 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2588 quickly with a - a * n for some appropriate constant n. */
2589 m
= exact_log2 (-orig_t
+ 1);
2590 if (m
>= 0 && m
< maxm
)
2592 op_cost
= shiftsub1_cost (speed
, mode
, m
);
2593 new_limit
.cost
= best_cost
.cost
- op_cost
;
2594 new_limit
.latency
= best_cost
.latency
- op_cost
;
2595 synth_mult (alg_in
, (unsigned HOST_WIDE_INT
) (-orig_t
+ 1) >> m
,
2598 alg_in
->cost
.cost
+= op_cost
;
2599 alg_in
->cost
.latency
+= op_cost
;
2600 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2602 struct algorithm
*x
;
2603 best_cost
= alg_in
->cost
;
2604 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2605 best_alg
->log
[best_alg
->ops
] = m
;
2606 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2614 /* Look for factors of t of the form
2615 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2616 If we find such a factor, we can multiply by t using an algorithm that
2617 multiplies by q, shift the result by m and add/subtract it to itself.
2619 We search for large factors first and loop down, even if large factors
2620 are less probable than small; if we find a large factor we will find a
2621 good sequence quickly, and therefore be able to prune (by decreasing
2622 COST_LIMIT) the search. */
2624 do_alg_addsub_factor
:
2625 for (m
= floor_log2 (t
- 1); m
>= 2; m
--)
2627 unsigned HOST_WIDE_INT d
;
2629 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) + 1;
2630 if (t
% d
== 0 && t
> d
&& m
< maxm
2631 && (!cache_hit
|| cache_alg
== alg_add_factor
))
2633 /* If the target has a cheap shift-and-add instruction use
2634 that in preference to a shift insn followed by an add insn.
2635 Assume that the shift-and-add is "atomic" with a latency
2636 equal to its cost, otherwise assume that on superscalar
2637 hardware the shift may be executed concurrently with the
2638 earlier steps in the algorithm. */
2639 op_cost
= add_cost (speed
, mode
) + shift_cost (speed
, mode
, m
);
2640 if (shiftadd_cost (speed
, mode
, m
) < op_cost
)
2642 op_cost
= shiftadd_cost (speed
, mode
, m
);
2643 op_latency
= op_cost
;
2646 op_latency
= add_cost (speed
, mode
);
2648 new_limit
.cost
= best_cost
.cost
- op_cost
;
2649 new_limit
.latency
= best_cost
.latency
- op_latency
;
2650 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2652 alg_in
->cost
.cost
+= op_cost
;
2653 alg_in
->cost
.latency
+= op_latency
;
2654 if (alg_in
->cost
.latency
< op_cost
)
2655 alg_in
->cost
.latency
= op_cost
;
2656 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2658 struct algorithm
*x
;
2659 best_cost
= alg_in
->cost
;
2660 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2661 best_alg
->log
[best_alg
->ops
] = m
;
2662 best_alg
->op
[best_alg
->ops
] = alg_add_factor
;
2664 /* Other factors will have been taken care of in the recursion. */
2668 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) - 1;
2669 if (t
% d
== 0 && t
> d
&& m
< maxm
2670 && (!cache_hit
|| cache_alg
== alg_sub_factor
))
2672 /* If the target has a cheap shift-and-subtract insn use
2673 that in preference to a shift insn followed by a sub insn.
2674 Assume that the shift-and-sub is "atomic" with a latency
2675 equal to it's cost, otherwise assume that on superscalar
2676 hardware the shift may be executed concurrently with the
2677 earlier steps in the algorithm. */
2678 op_cost
= add_cost (speed
, mode
) + shift_cost (speed
, mode
, m
);
2679 if (shiftsub0_cost (speed
, mode
, m
) < op_cost
)
2681 op_cost
= shiftsub0_cost (speed
, mode
, m
);
2682 op_latency
= op_cost
;
2685 op_latency
= add_cost (speed
, mode
);
2687 new_limit
.cost
= best_cost
.cost
- op_cost
;
2688 new_limit
.latency
= best_cost
.latency
- op_latency
;
2689 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2691 alg_in
->cost
.cost
+= op_cost
;
2692 alg_in
->cost
.latency
+= op_latency
;
2693 if (alg_in
->cost
.latency
< op_cost
)
2694 alg_in
->cost
.latency
= op_cost
;
2695 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2697 struct algorithm
*x
;
2698 best_cost
= alg_in
->cost
;
2699 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2700 best_alg
->log
[best_alg
->ops
] = m
;
2701 best_alg
->op
[best_alg
->ops
] = alg_sub_factor
;
2709 /* Try shift-and-add (load effective address) instructions,
2710 i.e. do a*3, a*5, a*9. */
2717 if (m
>= 0 && m
< maxm
)
2719 op_cost
= shiftadd_cost (speed
, mode
, m
);
2720 new_limit
.cost
= best_cost
.cost
- op_cost
;
2721 new_limit
.latency
= best_cost
.latency
- op_cost
;
2722 synth_mult (alg_in
, (t
- 1) >> m
, &new_limit
, mode
);
2724 alg_in
->cost
.cost
+= op_cost
;
2725 alg_in
->cost
.latency
+= op_cost
;
2726 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2728 struct algorithm
*x
;
2729 best_cost
= alg_in
->cost
;
2730 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2731 best_alg
->log
[best_alg
->ops
] = m
;
2732 best_alg
->op
[best_alg
->ops
] = alg_add_t2_m
;
2742 if (m
>= 0 && m
< maxm
)
2744 op_cost
= shiftsub0_cost (speed
, mode
, m
);
2745 new_limit
.cost
= best_cost
.cost
- op_cost
;
2746 new_limit
.latency
= best_cost
.latency
- op_cost
;
2747 synth_mult (alg_in
, (t
+ 1) >> m
, &new_limit
, mode
);
2749 alg_in
->cost
.cost
+= op_cost
;
2750 alg_in
->cost
.latency
+= op_cost
;
2751 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2753 struct algorithm
*x
;
2754 best_cost
= alg_in
->cost
;
2755 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2756 best_alg
->log
[best_alg
->ops
] = m
;
2757 best_alg
->op
[best_alg
->ops
] = alg_sub_t2_m
;
2765 /* If best_cost has not decreased, we have not found any algorithm. */
2766 if (!CHEAPER_MULT_COST (&best_cost
, cost_limit
))
2768 /* We failed to find an algorithm. Record alg_impossible for
2769 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2770 we are asked to find an algorithm for T within the same or
2771 lower COST_LIMIT, we can immediately return to the
2774 entry_ptr
->mode
= mode
;
2775 entry_ptr
->speed
= speed
;
2776 entry_ptr
->alg
= alg_impossible
;
2777 entry_ptr
->cost
= *cost_limit
;
2781 /* Cache the result. */
2785 entry_ptr
->mode
= mode
;
2786 entry_ptr
->speed
= speed
;
2787 entry_ptr
->alg
= best_alg
->op
[best_alg
->ops
];
2788 entry_ptr
->cost
.cost
= best_cost
.cost
;
2789 entry_ptr
->cost
.latency
= best_cost
.latency
;
2792 /* If we are getting a too long sequence for `struct algorithm'
2793 to record, make this search fail. */
2794 if (best_alg
->ops
== MAX_BITS_PER_WORD
)
2797 /* Copy the algorithm from temporary space to the space at alg_out.
2798 We avoid using structure assignment because the majority of
2799 best_alg is normally undefined, and this is a critical function. */
2800 alg_out
->ops
= best_alg
->ops
+ 1;
2801 alg_out
->cost
= best_cost
;
2802 memcpy (alg_out
->op
, best_alg
->op
,
2803 alg_out
->ops
* sizeof *alg_out
->op
);
2804 memcpy (alg_out
->log
, best_alg
->log
,
2805 alg_out
->ops
* sizeof *alg_out
->log
);
2808 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2809 Try three variations:
2811 - a shift/add sequence based on VAL itself
2812 - a shift/add sequence based on -VAL, followed by a negation
2813 - a shift/add sequence based on VAL - 1, followed by an addition.
2815 Return true if the cheapest of these cost less than MULT_COST,
2816 describing the algorithm in *ALG and final fixup in *VARIANT. */
2819 choose_mult_variant (enum machine_mode mode
, HOST_WIDE_INT val
,
2820 struct algorithm
*alg
, enum mult_variant
*variant
,
2823 struct algorithm alg2
;
2824 struct mult_cost limit
;
2826 bool speed
= optimize_insn_for_speed_p ();
2828 /* Fail quickly for impossible bounds. */
2832 /* Ensure that mult_cost provides a reasonable upper bound.
2833 Any constant multiplication can be performed with less
2834 than 2 * bits additions. */
2835 op_cost
= 2 * GET_MODE_UNIT_BITSIZE (mode
) * add_cost (speed
, mode
);
2836 if (mult_cost
> op_cost
)
2837 mult_cost
= op_cost
;
2839 *variant
= basic_variant
;
2840 limit
.cost
= mult_cost
;
2841 limit
.latency
= mult_cost
;
2842 synth_mult (alg
, val
, &limit
, mode
);
2844 /* This works only if the inverted value actually fits in an
2846 if (HOST_BITS_PER_INT
>= GET_MODE_UNIT_BITSIZE (mode
))
2848 op_cost
= neg_cost(speed
, mode
);
2849 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2851 limit
.cost
= alg
->cost
.cost
- op_cost
;
2852 limit
.latency
= alg
->cost
.latency
- op_cost
;
2856 limit
.cost
= mult_cost
- op_cost
;
2857 limit
.latency
= mult_cost
- op_cost
;
2860 synth_mult (&alg2
, -val
, &limit
, mode
);
2861 alg2
.cost
.cost
+= op_cost
;
2862 alg2
.cost
.latency
+= op_cost
;
2863 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2864 *alg
= alg2
, *variant
= negate_variant
;
2867 /* This proves very useful for division-by-constant. */
2868 op_cost
= add_cost (speed
, mode
);
2869 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2871 limit
.cost
= alg
->cost
.cost
- op_cost
;
2872 limit
.latency
= alg
->cost
.latency
- op_cost
;
2876 limit
.cost
= mult_cost
- op_cost
;
2877 limit
.latency
= mult_cost
- op_cost
;
2880 synth_mult (&alg2
, val
- 1, &limit
, mode
);
2881 alg2
.cost
.cost
+= op_cost
;
2882 alg2
.cost
.latency
+= op_cost
;
2883 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2884 *alg
= alg2
, *variant
= add_variant
;
2886 return MULT_COST_LESS (&alg
->cost
, mult_cost
);
2889 /* A subroutine of expand_mult, used for constant multiplications.
2890 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2891 convenient. Use the shift/add sequence described by ALG and apply
2892 the final fixup specified by VARIANT. */
2895 expand_mult_const (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT val
,
2896 rtx target
, const struct algorithm
*alg
,
2897 enum mult_variant variant
)
2899 HOST_WIDE_INT val_so_far
;
2900 rtx insn
, accum
, tem
;
2902 enum machine_mode nmode
;
2904 /* Avoid referencing memory over and over and invalid sharing
2906 op0
= force_reg (mode
, op0
);
2908 /* ACCUM starts out either as OP0 or as a zero, depending on
2909 the first operation. */
2911 if (alg
->op
[0] == alg_zero
)
2913 accum
= copy_to_mode_reg (mode
, CONST0_RTX (mode
));
2916 else if (alg
->op
[0] == alg_m
)
2918 accum
= copy_to_mode_reg (mode
, op0
);
2924 for (opno
= 1; opno
< alg
->ops
; opno
++)
2926 int log
= alg
->log
[opno
];
2927 rtx shift_subtarget
= optimize
? 0 : accum
;
2929 = (opno
== alg
->ops
- 1 && target
!= 0 && variant
!= add_variant
2932 rtx accum_target
= optimize
? 0 : accum
;
2935 switch (alg
->op
[opno
])
2938 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
2939 /* REG_EQUAL note will be attached to the following insn. */
2940 emit_move_insn (accum
, tem
);
2945 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
, log
, NULL_RTX
, 0);
2946 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2947 add_target
? add_target
: accum_target
);
2948 val_so_far
+= (HOST_WIDE_INT
) 1 << log
;
2952 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
, log
, NULL_RTX
, 0);
2953 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, tem
),
2954 add_target
? add_target
: accum_target
);
2955 val_so_far
-= (HOST_WIDE_INT
) 1 << log
;
2959 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2960 log
, shift_subtarget
, 0);
2961 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
),
2962 add_target
? add_target
: accum_target
);
2963 val_so_far
= (val_so_far
<< log
) + 1;
2967 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2968 log
, shift_subtarget
, 0);
2969 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, op0
),
2970 add_target
? add_target
: accum_target
);
2971 val_so_far
= (val_so_far
<< log
) - 1;
2974 case alg_add_factor
:
2975 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
2976 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2977 add_target
? add_target
: accum_target
);
2978 val_so_far
+= val_so_far
<< log
;
2981 case alg_sub_factor
:
2982 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
2983 accum
= force_operand (gen_rtx_MINUS (mode
, tem
, accum
),
2985 ? add_target
: (optimize
? 0 : tem
)));
2986 val_so_far
= (val_so_far
<< log
) - val_so_far
;
2993 if (SCALAR_INT_MODE_P (mode
))
2995 /* Write a REG_EQUAL note on the last insn so that we can cse
2996 multiplication sequences. Note that if ACCUM is a SUBREG,
2997 we've set the inner register and must properly indicate that. */
2998 tem
= op0
, nmode
= mode
;
2999 accum_inner
= accum
;
3000 if (GET_CODE (accum
) == SUBREG
)
3002 accum_inner
= SUBREG_REG (accum
);
3003 nmode
= GET_MODE (accum_inner
);
3004 tem
= gen_lowpart (nmode
, op0
);
3007 insn
= get_last_insn ();
3008 set_dst_reg_note (insn
, REG_EQUAL
,
3009 gen_rtx_MULT (nmode
, tem
, GEN_INT (val_so_far
)),
3014 if (variant
== negate_variant
)
3016 val_so_far
= -val_so_far
;
3017 accum
= expand_unop (mode
, neg_optab
, accum
, target
, 0);
3019 else if (variant
== add_variant
)
3021 val_so_far
= val_so_far
+ 1;
3022 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
), target
);
3025 /* Compare only the bits of val and val_so_far that are significant
3026 in the result mode, to avoid sign-/zero-extension confusion. */
3027 nmode
= GET_MODE_INNER (mode
);
3028 if (nmode
== VOIDmode
)
3030 val
&= GET_MODE_MASK (nmode
);
3031 val_so_far
&= GET_MODE_MASK (nmode
);
3032 gcc_assert (val
== val_so_far
);
3037 /* Perform a multiplication and return an rtx for the result.
3038 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3039 TARGET is a suggestion for where to store the result (an rtx).
3041 We check specially for a constant integer as OP1.
3042 If you want this check for OP0 as well, then before calling
3043 you should swap the two operands if OP0 would be constant. */
3046 expand_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3049 enum mult_variant variant
;
3050 struct algorithm algorithm
;
3053 bool speed
= optimize_insn_for_speed_p ();
3054 bool do_trapv
= flag_trapv
&& SCALAR_INT_MODE_P (mode
) && !unsignedp
;
3056 if (CONSTANT_P (op0
))
3063 /* For vectors, there are several simplifications that can be made if
3064 all elements of the vector constant are identical. */
3066 if (GET_CODE (op1
) == CONST_VECTOR
)
3068 int i
, n
= CONST_VECTOR_NUNITS (op1
);
3069 scalar_op1
= CONST_VECTOR_ELT (op1
, 0);
3070 for (i
= 1; i
< n
; ++i
)
3071 if (!rtx_equal_p (scalar_op1
, CONST_VECTOR_ELT (op1
, i
)))
3075 if (INTEGRAL_MODE_P (mode
))
3078 HOST_WIDE_INT coeff
;
3082 if (op1
== CONST0_RTX (mode
))
3084 if (op1
== CONST1_RTX (mode
))
3086 if (op1
== CONSTM1_RTX (mode
))
3087 return expand_unop (mode
, do_trapv
? negv_optab
: neg_optab
,
3093 /* These are the operations that are potentially turned into
3094 a sequence of shifts and additions. */
3095 mode_bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
3097 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3098 less than or equal in size to `unsigned int' this doesn't matter.
3099 If the mode is larger than `unsigned int', then synth_mult works
3100 only if the constant value exactly fits in an `unsigned int' without
3101 any truncation. This means that multiplying by negative values does
3102 not work; results are off by 2^32 on a 32 bit machine. */
3104 if (CONST_INT_P (scalar_op1
))
3106 coeff
= INTVAL (scalar_op1
);
3109 else if (CONST_DOUBLE_AS_INT_P (scalar_op1
))
3111 /* If we are multiplying in DImode, it may still be a win
3112 to try to work with shifts and adds. */
3113 if (CONST_DOUBLE_HIGH (scalar_op1
) == 0
3114 && (CONST_DOUBLE_LOW (scalar_op1
) > 0
3115 || (CONST_DOUBLE_LOW (scalar_op1
) < 0
3116 && EXACT_POWER_OF_2_OR_ZERO_P
3117 (CONST_DOUBLE_LOW (scalar_op1
)))))
3119 coeff
= CONST_DOUBLE_LOW (scalar_op1
);
3122 else if (CONST_DOUBLE_LOW (scalar_op1
) == 0)
3124 coeff
= CONST_DOUBLE_HIGH (scalar_op1
);
3125 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3127 int shift
= floor_log2 (coeff
) + HOST_BITS_PER_WIDE_INT
;
3128 if (shift
< HOST_BITS_PER_DOUBLE_INT
- 1
3129 || mode_bitsize
<= HOST_BITS_PER_DOUBLE_INT
)
3130 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3131 shift
, target
, unsignedp
);
3141 /* We used to test optimize here, on the grounds that it's better to
3142 produce a smaller program when -O is not used. But this causes
3143 such a terrible slowdown sometimes that it seems better to always
3146 /* Special case powers of two. */
3147 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
)
3148 && !(is_neg
&& mode_bitsize
> HOST_BITS_PER_WIDE_INT
))
3149 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3150 floor_log2 (coeff
), target
, unsignedp
);
3152 fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3154 /* Attempt to handle multiplication of DImode values by negative
3155 coefficients, by performing the multiplication by a positive
3156 multiplier and then inverting the result. */
3157 if (is_neg
&& mode_bitsize
> HOST_BITS_PER_WIDE_INT
)
3159 /* Its safe to use -coeff even for INT_MIN, as the
3160 result is interpreted as an unsigned coefficient.
3161 Exclude cost of op0 from max_cost to match the cost
3162 calculation of the synth_mult. */
3163 coeff
= -(unsigned HOST_WIDE_INT
) coeff
;
3164 max_cost
= (set_src_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), speed
)
3165 - neg_cost(speed
, mode
));
3169 /* Special case powers of two. */
3170 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3172 rtx temp
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
3173 floor_log2 (coeff
), target
, unsignedp
);
3174 return expand_unop (mode
, neg_optab
, temp
, target
, 0);
3177 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3180 rtx temp
= expand_mult_const (mode
, op0
, coeff
, NULL_RTX
,
3181 &algorithm
, variant
);
3182 return expand_unop (mode
, neg_optab
, temp
, target
, 0);
3187 /* Exclude cost of op0 from max_cost to match the cost
3188 calculation of the synth_mult. */
3189 max_cost
= set_src_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), speed
);
3190 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
, max_cost
))
3191 return expand_mult_const (mode
, op0
, coeff
, target
,
3192 &algorithm
, variant
);
3196 /* Expand x*2.0 as x+x. */
3197 if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1
))
3200 REAL_VALUE_FROM_CONST_DOUBLE (d
, scalar_op1
);
3202 if (REAL_VALUES_EQUAL (d
, dconst2
))
3204 op0
= force_reg (GET_MODE (op0
), op0
);
3205 return expand_binop (mode
, add_optab
, op0
, op0
,
3206 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3211 /* This used to use umul_optab if unsigned, but for non-widening multiply
3212 there is no difference between signed and unsigned. */
3213 op0
= expand_binop (mode
, do_trapv
? smulv_optab
: smul_optab
,
3214 op0
, op1
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
3219 /* Return a cost estimate for multiplying a register by the given
3220 COEFFicient in the given MODE and SPEED. */
3223 mult_by_coeff_cost (HOST_WIDE_INT coeff
, enum machine_mode mode
, bool speed
)
3226 struct algorithm algorithm
;
3227 enum mult_variant variant
;
3229 rtx fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3230 max_cost
= set_src_cost (gen_rtx_MULT (mode
, fake_reg
, fake_reg
), speed
);
3231 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
, max_cost
))
3232 return algorithm
.cost
.cost
;
3237 /* Perform a widening multiplication and return an rtx for the result.
3238 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3239 TARGET is a suggestion for where to store the result (an rtx).
3240 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3241 or smul_widen_optab.
3243 We check specially for a constant integer as OP1, comparing the
3244 cost of a widening multiply against the cost of a sequence of shifts
3248 expand_widening_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3249 int unsignedp
, optab this_optab
)
3251 bool speed
= optimize_insn_for_speed_p ();
3254 if (CONST_INT_P (op1
)
3255 && GET_MODE (op0
) != VOIDmode
3256 && (cop1
= convert_modes (mode
, GET_MODE (op0
), op1
,
3257 this_optab
== umul_widen_optab
))
3258 && CONST_INT_P (cop1
)
3259 && (INTVAL (cop1
) >= 0
3260 || HWI_COMPUTABLE_MODE_P (mode
)))
3262 HOST_WIDE_INT coeff
= INTVAL (cop1
);
3264 enum mult_variant variant
;
3265 struct algorithm algorithm
;
3267 /* Special case powers of two. */
3268 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3270 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3271 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3272 floor_log2 (coeff
), target
, unsignedp
);
3275 /* Exclude cost of op0 from max_cost to match the cost
3276 calculation of the synth_mult. */
3277 max_cost
= mul_widen_cost (speed
, mode
);
3278 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3281 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3282 return expand_mult_const (mode
, op0
, coeff
, target
,
3283 &algorithm
, variant
);
3286 return expand_binop (mode
, this_optab
, op0
, op1
, target
,
3287 unsignedp
, OPTAB_LIB_WIDEN
);
3290 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3291 replace division by D, and put the least significant N bits of the result
3292 in *MULTIPLIER_PTR and return the most significant bit.
3294 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3295 needed precision is in PRECISION (should be <= N).
3297 PRECISION should be as small as possible so this function can choose
3298 multiplier more freely.
3300 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3301 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3303 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3304 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3306 unsigned HOST_WIDE_INT
3307 choose_multiplier (unsigned HOST_WIDE_INT d
, int n
, int precision
,
3308 unsigned HOST_WIDE_INT
*multiplier_ptr
,
3309 int *post_shift_ptr
, int *lgup_ptr
)
3311 double_int mhigh
, mlow
;
3312 int lgup
, post_shift
;
3315 /* lgup = ceil(log2(divisor)); */
3316 lgup
= ceil_log2 (d
);
3318 gcc_assert (lgup
<= n
);
3321 pow2
= n
+ lgup
- precision
;
3323 /* We could handle this with some effort, but this case is much
3324 better handled directly with a scc insn, so rely on caller using
3326 gcc_assert (pow
!= HOST_BITS_PER_DOUBLE_INT
);
3328 /* mlow = 2^(N + lgup)/d */
3329 double_int val
= double_int_zero
.set_bit (pow
);
3330 mlow
= val
.div (double_int::from_uhwi (d
), true, TRUNC_DIV_EXPR
);
3332 /* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
3333 val
|= double_int_zero
.set_bit (pow2
);
3334 mhigh
= val
.div (double_int::from_uhwi (d
), true, TRUNC_DIV_EXPR
);
3336 gcc_assert (!mhigh
.high
|| val
.high
- d
< d
);
3337 gcc_assert (mhigh
.high
<= 1 && mlow
.high
<= 1);
3338 /* Assert that mlow < mhigh. */
3339 gcc_assert (mlow
.ult (mhigh
));
3341 /* If precision == N, then mlow, mhigh exceed 2^N
3342 (but they do not exceed 2^(N+1)). */
3344 /* Reduce to lowest terms. */
3345 for (post_shift
= lgup
; post_shift
> 0; post_shift
--)
3347 int shft
= HOST_BITS_PER_WIDE_INT
- 1;
3348 unsigned HOST_WIDE_INT ml_lo
= (mlow
.high
<< shft
) | (mlow
.low
>> 1);
3349 unsigned HOST_WIDE_INT mh_lo
= (mhigh
.high
<< shft
) | (mhigh
.low
>> 1);
3353 mlow
= double_int::from_uhwi (ml_lo
);
3354 mhigh
= double_int::from_uhwi (mh_lo
);
3357 *post_shift_ptr
= post_shift
;
3359 if (n
< HOST_BITS_PER_WIDE_INT
)
3361 unsigned HOST_WIDE_INT mask
= ((unsigned HOST_WIDE_INT
) 1 << n
) - 1;
3362 *multiplier_ptr
= mhigh
.low
& mask
;
3363 return mhigh
.low
>= mask
;
3367 *multiplier_ptr
= mhigh
.low
;
3372 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3373 congruent to 1 (mod 2**N). */
3375 static unsigned HOST_WIDE_INT
3376 invert_mod2n (unsigned HOST_WIDE_INT x
, int n
)
3378 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3380 /* The algorithm notes that the choice y = x satisfies
3381 x*y == 1 mod 2^3, since x is assumed odd.
3382 Each iteration doubles the number of bits of significance in y. */
3384 unsigned HOST_WIDE_INT mask
;
3385 unsigned HOST_WIDE_INT y
= x
;
3388 mask
= (n
== HOST_BITS_PER_WIDE_INT
3389 ? ~(unsigned HOST_WIDE_INT
) 0
3390 : ((unsigned HOST_WIDE_INT
) 1 << n
) - 1);
3394 y
= y
* (2 - x
*y
) & mask
; /* Modulo 2^N */
3400 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3401 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3402 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3403 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3406 The result is put in TARGET if that is convenient.
3408 MODE is the mode of operation. */
3411 expand_mult_highpart_adjust (enum machine_mode mode
, rtx adj_operand
, rtx op0
,
3412 rtx op1
, rtx target
, int unsignedp
)
3415 enum rtx_code adj_code
= unsignedp
? PLUS
: MINUS
;
3417 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3418 GET_MODE_BITSIZE (mode
) - 1, NULL_RTX
, 0);
3419 tem
= expand_and (mode
, tem
, op1
, NULL_RTX
);
3421 = force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3424 tem
= expand_shift (RSHIFT_EXPR
, mode
, op1
,
3425 GET_MODE_BITSIZE (mode
) - 1, NULL_RTX
, 0);
3426 tem
= expand_and (mode
, tem
, op0
, NULL_RTX
);
3427 target
= force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3433 /* Subroutine of expmed_mult_highpart. Return the MODE high part of OP. */
3436 extract_high_half (enum machine_mode mode
, rtx op
)
3438 enum machine_mode wider_mode
;
3440 if (mode
== word_mode
)
3441 return gen_highpart (mode
, op
);
3443 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3445 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3446 op
= expand_shift (RSHIFT_EXPR
, wider_mode
, op
,
3447 GET_MODE_BITSIZE (mode
), 0, 1);
3448 return convert_modes (mode
, wider_mode
, op
, 0);
3451 /* Like expmed_mult_highpart, but only consider using a multiplication
3452 optab. OP1 is an rtx for the constant operand. */
3455 expmed_mult_highpart_optab (enum machine_mode mode
, rtx op0
, rtx op1
,
3456 rtx target
, int unsignedp
, int max_cost
)
3458 rtx narrow_op1
= gen_int_mode (INTVAL (op1
), mode
);
3459 enum machine_mode wider_mode
;
3463 bool speed
= optimize_insn_for_speed_p ();
3465 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3467 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3468 size
= GET_MODE_BITSIZE (mode
);
3470 /* Firstly, try using a multiplication insn that only generates the needed
3471 high part of the product, and in the sign flavor of unsignedp. */
3472 if (mul_highpart_cost (speed
, mode
) < max_cost
)
3474 moptab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
3475 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3476 unsignedp
, OPTAB_DIRECT
);
3481 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3482 Need to adjust the result after the multiplication. */
3483 if (size
- 1 < BITS_PER_WORD
3484 && (mul_highpart_cost (speed
, mode
)
3485 + 2 * shift_cost (speed
, mode
, size
-1)
3486 + 4 * add_cost (speed
, mode
) < max_cost
))
3488 moptab
= unsignedp
? smul_highpart_optab
: umul_highpart_optab
;
3489 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3490 unsignedp
, OPTAB_DIRECT
);
3492 /* We used the wrong signedness. Adjust the result. */
3493 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3497 /* Try widening multiplication. */
3498 moptab
= unsignedp
? umul_widen_optab
: smul_widen_optab
;
3499 if (widening_optab_handler (moptab
, wider_mode
, mode
) != CODE_FOR_nothing
3500 && mul_widen_cost (speed
, wider_mode
) < max_cost
)
3502 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
, 0,
3503 unsignedp
, OPTAB_WIDEN
);
3505 return extract_high_half (mode
, tem
);
3508 /* Try widening the mode and perform a non-widening multiplication. */
3509 if (optab_handler (smul_optab
, wider_mode
) != CODE_FOR_nothing
3510 && size
- 1 < BITS_PER_WORD
3511 && (mul_cost (speed
, wider_mode
) + shift_cost (speed
, mode
, size
-1)
3514 rtx insns
, wop0
, wop1
;
3516 /* We need to widen the operands, for example to ensure the
3517 constant multiplier is correctly sign or zero extended.
3518 Use a sequence to clean-up any instructions emitted by
3519 the conversions if things don't work out. */
3521 wop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
3522 wop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
3523 tem
= expand_binop (wider_mode
, smul_optab
, wop0
, wop1
, 0,
3524 unsignedp
, OPTAB_WIDEN
);
3525 insns
= get_insns ();
3531 return extract_high_half (mode
, tem
);
3535 /* Try widening multiplication of opposite signedness, and adjust. */
3536 moptab
= unsignedp
? smul_widen_optab
: umul_widen_optab
;
3537 if (widening_optab_handler (moptab
, wider_mode
, mode
) != CODE_FOR_nothing
3538 && size
- 1 < BITS_PER_WORD
3539 && (mul_widen_cost (speed
, wider_mode
)
3540 + 2 * shift_cost (speed
, mode
, size
-1)
3541 + 4 * add_cost (speed
, mode
) < max_cost
))
3543 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
,
3544 NULL_RTX
, ! unsignedp
, OPTAB_WIDEN
);
3547 tem
= extract_high_half (mode
, tem
);
3548 /* We used the wrong signedness. Adjust the result. */
3549 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3557 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3558 putting the high half of the result in TARGET if that is convenient,
3559 and return where the result is. If the operation can not be performed,
3562 MODE is the mode of operation and result.
3564 UNSIGNEDP nonzero means unsigned multiply.
3566 MAX_COST is the total allowed cost for the expanded RTL. */
3569 expmed_mult_highpart (enum machine_mode mode
, rtx op0
, rtx op1
,
3570 rtx target
, int unsignedp
, int max_cost
)
3572 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
3573 unsigned HOST_WIDE_INT cnst1
;
3575 bool sign_adjust
= false;
3576 enum mult_variant variant
;
3577 struct algorithm alg
;
3579 bool speed
= optimize_insn_for_speed_p ();
3581 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3582 /* We can't support modes wider than HOST_BITS_PER_INT. */
3583 gcc_assert (HWI_COMPUTABLE_MODE_P (mode
));
3585 cnst1
= INTVAL (op1
) & GET_MODE_MASK (mode
);
3587 /* We can't optimize modes wider than BITS_PER_WORD.
3588 ??? We might be able to perform double-word arithmetic if
3589 mode == word_mode, however all the cost calculations in
3590 synth_mult etc. assume single-word operations. */
3591 if (GET_MODE_BITSIZE (wider_mode
) > BITS_PER_WORD
)
3592 return expmed_mult_highpart_optab (mode
, op0
, op1
, target
,
3593 unsignedp
, max_cost
);
3595 extra_cost
= shift_cost (speed
, mode
, GET_MODE_BITSIZE (mode
) - 1);
3597 /* Check whether we try to multiply by a negative constant. */
3598 if (!unsignedp
&& ((cnst1
>> (GET_MODE_BITSIZE (mode
) - 1)) & 1))
3601 extra_cost
+= add_cost (speed
, mode
);
3604 /* See whether shift/add multiplication is cheap enough. */
3605 if (choose_mult_variant (wider_mode
, cnst1
, &alg
, &variant
,
3606 max_cost
- extra_cost
))
3608 /* See whether the specialized multiplication optabs are
3609 cheaper than the shift/add version. */
3610 tem
= expmed_mult_highpart_optab (mode
, op0
, op1
, target
, unsignedp
,
3611 alg
.cost
.cost
+ extra_cost
);
3615 tem
= convert_to_mode (wider_mode
, op0
, unsignedp
);
3616 tem
= expand_mult_const (wider_mode
, tem
, cnst1
, 0, &alg
, variant
);
3617 tem
= extract_high_half (mode
, tem
);
3619 /* Adjust result for signedness. */
3621 tem
= force_operand (gen_rtx_MINUS (mode
, tem
, op0
), tem
);
3625 return expmed_mult_highpart_optab (mode
, op0
, op1
, target
,
3626 unsignedp
, max_cost
);
3630 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3633 expand_smod_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3635 unsigned HOST_WIDE_INT masklow
, maskhigh
;
3636 rtx result
, temp
, shift
, label
;
3639 logd
= floor_log2 (d
);
3640 result
= gen_reg_rtx (mode
);
3642 /* Avoid conditional branches when they're expensive. */
3643 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3644 && optimize_insn_for_speed_p ())
3646 rtx signmask
= emit_store_flag (result
, LT
, op0
, const0_rtx
,
3650 signmask
= force_reg (mode
, signmask
);
3651 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3652 shift
= GEN_INT (GET_MODE_BITSIZE (mode
) - logd
);
3654 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3655 which instruction sequence to use. If logical right shifts
3656 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3657 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3659 temp
= gen_rtx_LSHIFTRT (mode
, result
, shift
);
3660 if (optab_handler (lshr_optab
, mode
) == CODE_FOR_nothing
3661 || (set_src_cost (temp
, optimize_insn_for_speed_p ())
3662 > COSTS_N_INSNS (2)))
3664 temp
= expand_binop (mode
, xor_optab
, op0
, signmask
,
3665 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3666 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3667 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3668 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3669 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3670 temp
= expand_binop (mode
, xor_optab
, temp
, signmask
,
3671 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3672 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3673 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3677 signmask
= expand_binop (mode
, lshr_optab
, signmask
, shift
,
3678 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3679 signmask
= force_reg (mode
, signmask
);
3681 temp
= expand_binop (mode
, add_optab
, op0
, signmask
,
3682 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3683 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3684 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3685 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3686 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3692 /* Mask contains the mode's signbit and the significant bits of the
3693 modulus. By including the signbit in the operation, many targets
3694 can avoid an explicit compare operation in the following comparison
3697 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3698 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3700 masklow
|= (HOST_WIDE_INT
) -1 << (GET_MODE_BITSIZE (mode
) - 1);
3704 maskhigh
= (HOST_WIDE_INT
) -1
3705 << (GET_MODE_BITSIZE (mode
) - HOST_BITS_PER_WIDE_INT
- 1);
3707 temp
= expand_binop (mode
, and_optab
, op0
,
3708 immed_double_const (masklow
, maskhigh
, mode
),
3709 result
, 1, OPTAB_LIB_WIDEN
);
3711 emit_move_insn (result
, temp
);
3713 label
= gen_label_rtx ();
3714 do_cmp_and_jump (result
, const0_rtx
, GE
, mode
, label
);
3716 temp
= expand_binop (mode
, sub_optab
, result
, const1_rtx
, result
,
3717 0, OPTAB_LIB_WIDEN
);
3718 masklow
= (HOST_WIDE_INT
) -1 << logd
;
3720 temp
= expand_binop (mode
, ior_optab
, temp
,
3721 immed_double_const (masklow
, maskhigh
, mode
),
3722 result
, 1, OPTAB_LIB_WIDEN
);
3723 temp
= expand_binop (mode
, add_optab
, temp
, const1_rtx
, result
,
3724 0, OPTAB_LIB_WIDEN
);
3726 emit_move_insn (result
, temp
);
3731 /* Expand signed division of OP0 by a power of two D in mode MODE.
3732 This routine is only called for positive values of D. */
3735 expand_sdiv_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3740 logd
= floor_log2 (d
);
3743 && BRANCH_COST (optimize_insn_for_speed_p (),
3746 temp
= gen_reg_rtx (mode
);
3747 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, 1);
3748 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3749 0, OPTAB_LIB_WIDEN
);
3750 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
3753 #ifdef HAVE_conditional_move
3754 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3759 /* ??? emit_conditional_move forces a stack adjustment via
3760 compare_from_rtx so, if the sequence is discarded, it will
3761 be lost. Do it now instead. */
3762 do_pending_stack_adjust ();
3765 temp2
= copy_to_mode_reg (mode
, op0
);
3766 temp
= expand_binop (mode
, add_optab
, temp2
, GEN_INT (d
-1),
3767 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3768 temp
= force_reg (mode
, temp
);
3770 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3771 temp2
= emit_conditional_move (temp2
, LT
, temp2
, const0_rtx
,
3772 mode
, temp
, temp2
, mode
, 0);
3775 rtx seq
= get_insns ();
3778 return expand_shift (RSHIFT_EXPR
, mode
, temp2
, logd
, NULL_RTX
, 0);
3784 if (BRANCH_COST (optimize_insn_for_speed_p (),
3787 int ushift
= GET_MODE_BITSIZE (mode
) - logd
;
3789 temp
= gen_reg_rtx (mode
);
3790 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, -1);
3791 if (shift_cost (optimize_insn_for_speed_p (), mode
, ushift
)
3792 > COSTS_N_INSNS (1))
3793 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (d
- 1),
3794 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3796 temp
= expand_shift (RSHIFT_EXPR
, mode
, temp
,
3797 ushift
, NULL_RTX
, 1);
3798 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3799 0, OPTAB_LIB_WIDEN
);
3800 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
3803 label
= gen_label_rtx ();
3804 temp
= copy_to_mode_reg (mode
, op0
);
3805 do_cmp_and_jump (temp
, const0_rtx
, GE
, mode
, label
);
3806 expand_inc (temp
, GEN_INT (d
- 1));
3808 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
3811 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3812 if that is convenient, and returning where the result is.
3813 You may request either the quotient or the remainder as the result;
3814 specify REM_FLAG nonzero to get the remainder.
3816 CODE is the expression code for which kind of division this is;
3817 it controls how rounding is done. MODE is the machine mode to use.
3818 UNSIGNEDP nonzero means do unsigned division. */
3820 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3821 and then correct it by or'ing in missing high bits
3822 if result of ANDI is nonzero.
3823 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3824 This could optimize to a bfexts instruction.
3825 But C doesn't use these operations, so their optimizations are
3827 /* ??? For modulo, we don't actually need the highpart of the first product,
3828 the low part will do nicely. And for small divisors, the second multiply
3829 can also be a low-part only multiply or even be completely left out.
3830 E.g. to calculate the remainder of a division by 3 with a 32 bit
3831 multiply, multiply with 0x55555556 and extract the upper two bits;
3832 the result is exact for inputs up to 0x1fffffff.
3833 The input range can be reduced by using cross-sum rules.
3834 For odd divisors >= 3, the following table gives right shift counts
3835 so that if a number is shifted by an integer multiple of the given
3836 amount, the remainder stays the same:
3837 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3838 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3839 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3840 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3841 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3843 Cross-sum rules for even numbers can be derived by leaving as many bits
3844 to the right alone as the divisor has zeros to the right.
3845 E.g. if x is an unsigned 32 bit number:
3846 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3850 expand_divmod (int rem_flag
, enum tree_code code
, enum machine_mode mode
,
3851 rtx op0
, rtx op1
, rtx target
, int unsignedp
)
3853 enum machine_mode compute_mode
;
3855 rtx quotient
= 0, remainder
= 0;
3859 optab optab1
, optab2
;
3860 int op1_is_constant
, op1_is_pow2
= 0;
3861 int max_cost
, extra_cost
;
3862 static HOST_WIDE_INT last_div_const
= 0;
3863 bool speed
= optimize_insn_for_speed_p ();
3865 op1_is_constant
= CONST_INT_P (op1
);
3866 if (op1_is_constant
)
3868 unsigned HOST_WIDE_INT ext_op1
= UINTVAL (op1
);
3870 ext_op1
&= GET_MODE_MASK (mode
);
3871 op1_is_pow2
= ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1
)
3872 || (! unsignedp
&& EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1
))));
3876 This is the structure of expand_divmod:
3878 First comes code to fix up the operands so we can perform the operations
3879 correctly and efficiently.
3881 Second comes a switch statement with code specific for each rounding mode.
3882 For some special operands this code emits all RTL for the desired
3883 operation, for other cases, it generates only a quotient and stores it in
3884 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3885 to indicate that it has not done anything.
3887 Last comes code that finishes the operation. If QUOTIENT is set and
3888 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3889 QUOTIENT is not set, it is computed using trunc rounding.
3891 We try to generate special code for division and remainder when OP1 is a
3892 constant. If |OP1| = 2**n we can use shifts and some other fast
3893 operations. For other values of OP1, we compute a carefully selected
3894 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3897 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3898 half of the product. Different strategies for generating the product are
3899 implemented in expmed_mult_highpart.
3901 If what we actually want is the remainder, we generate that by another
3902 by-constant multiplication and a subtraction. */
3904 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3905 code below will malfunction if we are, so check here and handle
3906 the special case if so. */
3907 if (op1
== const1_rtx
)
3908 return rem_flag
? const0_rtx
: op0
;
3910 /* When dividing by -1, we could get an overflow.
3911 negv_optab can handle overflows. */
3912 if (! unsignedp
&& op1
== constm1_rtx
)
3916 return expand_unop (mode
, flag_trapv
&& GET_MODE_CLASS(mode
) == MODE_INT
3917 ? negv_optab
: neg_optab
, op0
, target
, 0);
3921 /* Don't use the function value register as a target
3922 since we have to read it as well as write it,
3923 and function-inlining gets confused by this. */
3924 && ((REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
3925 /* Don't clobber an operand while doing a multi-step calculation. */
3926 || ((rem_flag
|| op1_is_constant
)
3927 && (reg_mentioned_p (target
, op0
)
3928 || (MEM_P (op0
) && MEM_P (target
))))
3929 || reg_mentioned_p (target
, op1
)
3930 || (MEM_P (op1
) && MEM_P (target
))))
3933 /* Get the mode in which to perform this computation. Normally it will
3934 be MODE, but sometimes we can't do the desired operation in MODE.
3935 If so, pick a wider mode in which we can do the operation. Convert
3936 to that mode at the start to avoid repeated conversions.
3938 First see what operations we need. These depend on the expression
3939 we are evaluating. (We assume that divxx3 insns exist under the
3940 same conditions that modxx3 insns and that these insns don't normally
3941 fail. If these assumptions are not correct, we may generate less
3942 efficient code in some cases.)
3944 Then see if we find a mode in which we can open-code that operation
3945 (either a division, modulus, or shift). Finally, check for the smallest
3946 mode for which we can do the operation with a library call. */
3948 /* We might want to refine this now that we have division-by-constant
3949 optimization. Since expmed_mult_highpart tries so many variants, it is
3950 not straightforward to generalize this. Maybe we should make an array
3951 of possible modes in init_expmed? Save this for GCC 2.7. */
3953 optab1
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3954 ? (unsignedp
? lshr_optab
: ashr_optab
)
3955 : (unsignedp
? udiv_optab
: sdiv_optab
));
3956 optab2
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3958 : (unsignedp
? udivmod_optab
: sdivmod_optab
));
3960 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3961 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3962 if (optab_handler (optab1
, compute_mode
) != CODE_FOR_nothing
3963 || optab_handler (optab2
, compute_mode
) != CODE_FOR_nothing
)
3966 if (compute_mode
== VOIDmode
)
3967 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3968 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3969 if (optab_libfunc (optab1
, compute_mode
)
3970 || optab_libfunc (optab2
, compute_mode
))
3973 /* If we still couldn't find a mode, use MODE, but expand_binop will
3975 if (compute_mode
== VOIDmode
)
3976 compute_mode
= mode
;
3978 if (target
&& GET_MODE (target
) == compute_mode
)
3981 tquotient
= gen_reg_rtx (compute_mode
);
3983 size
= GET_MODE_BITSIZE (compute_mode
);
3985 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3986 (mode), and thereby get better code when OP1 is a constant. Do that
3987 later. It will require going over all usages of SIZE below. */
3988 size
= GET_MODE_BITSIZE (mode
);
3991 /* Only deduct something for a REM if the last divide done was
3992 for a different constant. Then set the constant of the last
3994 max_cost
= (unsignedp
3995 ? udiv_cost (speed
, compute_mode
)
3996 : sdiv_cost (speed
, compute_mode
));
3997 if (rem_flag
&& ! (last_div_const
!= 0 && op1_is_constant
3998 && INTVAL (op1
) == last_div_const
))
3999 max_cost
-= (mul_cost (speed
, compute_mode
)
4000 + add_cost (speed
, compute_mode
));
4002 last_div_const
= ! rem_flag
&& op1_is_constant
? INTVAL (op1
) : 0;
4004 /* Now convert to the best mode to use. */
4005 if (compute_mode
!= mode
)
4007 op0
= convert_modes (compute_mode
, mode
, op0
, unsignedp
);
4008 op1
= convert_modes (compute_mode
, mode
, op1
, unsignedp
);
4010 /* convert_modes may have placed op1 into a register, so we
4011 must recompute the following. */
4012 op1_is_constant
= CONST_INT_P (op1
);
4013 op1_is_pow2
= (op1_is_constant
4014 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4016 && EXACT_POWER_OF_2_OR_ZERO_P (-UINTVAL (op1
))))));
4019 /* If one of the operands is a volatile MEM, copy it into a register. */
4021 if (MEM_P (op0
) && MEM_VOLATILE_P (op0
))
4022 op0
= force_reg (compute_mode
, op0
);
4023 if (MEM_P (op1
) && MEM_VOLATILE_P (op1
))
4024 op1
= force_reg (compute_mode
, op1
);
4026 /* If we need the remainder or if OP1 is constant, we need to
4027 put OP0 in a register in case it has any queued subexpressions. */
4028 if (rem_flag
|| op1_is_constant
)
4029 op0
= force_reg (compute_mode
, op0
);
4031 last
= get_last_insn ();
4033 /* Promote floor rounding to trunc rounding for unsigned operations. */
4036 if (code
== FLOOR_DIV_EXPR
)
4037 code
= TRUNC_DIV_EXPR
;
4038 if (code
== FLOOR_MOD_EXPR
)
4039 code
= TRUNC_MOD_EXPR
;
4040 if (code
== EXACT_DIV_EXPR
&& op1_is_pow2
)
4041 code
= TRUNC_DIV_EXPR
;
4044 if (op1
!= const0_rtx
)
4047 case TRUNC_MOD_EXPR
:
4048 case TRUNC_DIV_EXPR
:
4049 if (op1_is_constant
)
4053 unsigned HOST_WIDE_INT mh
, ml
;
4054 int pre_shift
, post_shift
;
4056 unsigned HOST_WIDE_INT d
= (INTVAL (op1
)
4057 & GET_MODE_MASK (compute_mode
));
4059 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4061 pre_shift
= floor_log2 (d
);
4065 = expand_binop (compute_mode
, and_optab
, op0
,
4066 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4070 return gen_lowpart (mode
, remainder
);
4072 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4073 pre_shift
, tquotient
, 1);
4075 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4077 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (size
- 1)))
4079 /* Most significant bit of divisor is set; emit an scc
4081 quotient
= emit_store_flag_force (tquotient
, GEU
, op0
, op1
,
4082 compute_mode
, 1, 1);
4086 /* Find a suitable multiplier and right shift count
4087 instead of multiplying with D. */
4089 mh
= choose_multiplier (d
, size
, size
,
4090 &ml
, &post_shift
, &dummy
);
4092 /* If the suggested multiplier is more than SIZE bits,
4093 we can do better for even divisors, using an
4094 initial right shift. */
4095 if (mh
!= 0 && (d
& 1) == 0)
4097 pre_shift
= floor_log2 (d
& -d
);
4098 mh
= choose_multiplier (d
>> pre_shift
, size
,
4100 &ml
, &post_shift
, &dummy
);
4110 if (post_shift
- 1 >= BITS_PER_WORD
)
4114 = (shift_cost (speed
, compute_mode
, post_shift
- 1)
4115 + shift_cost (speed
, compute_mode
, 1)
4116 + 2 * add_cost (speed
, compute_mode
));
4117 t1
= expmed_mult_highpart (compute_mode
, op0
,
4120 max_cost
- extra_cost
);
4123 t2
= force_operand (gen_rtx_MINUS (compute_mode
,
4126 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
,
4127 t2
, 1, NULL_RTX
, 1);
4128 t4
= force_operand (gen_rtx_PLUS (compute_mode
,
4131 quotient
= expand_shift
4132 (RSHIFT_EXPR
, compute_mode
, t4
,
4133 post_shift
- 1, tquotient
, 1);
4139 if (pre_shift
>= BITS_PER_WORD
4140 || post_shift
>= BITS_PER_WORD
)
4144 (RSHIFT_EXPR
, compute_mode
, op0
,
4145 pre_shift
, NULL_RTX
, 1);
4147 = (shift_cost (speed
, compute_mode
, pre_shift
)
4148 + shift_cost (speed
, compute_mode
, post_shift
));
4149 t2
= expmed_mult_highpart (compute_mode
, t1
,
4152 max_cost
- extra_cost
);
4155 quotient
= expand_shift
4156 (RSHIFT_EXPR
, compute_mode
, t2
,
4157 post_shift
, tquotient
, 1);
4161 else /* Too wide mode to use tricky code */
4164 insn
= get_last_insn ();
4166 set_dst_reg_note (insn
, REG_EQUAL
,
4167 gen_rtx_UDIV (compute_mode
, op0
, op1
),
4170 else /* TRUNC_DIV, signed */
4172 unsigned HOST_WIDE_INT ml
;
4173 int lgup
, post_shift
;
4175 HOST_WIDE_INT d
= INTVAL (op1
);
4176 unsigned HOST_WIDE_INT abs_d
;
4178 /* Since d might be INT_MIN, we have to cast to
4179 unsigned HOST_WIDE_INT before negating to avoid
4180 undefined signed overflow. */
4182 ? (unsigned HOST_WIDE_INT
) d
4183 : - (unsigned HOST_WIDE_INT
) d
);
4185 /* n rem d = n rem -d */
4186 if (rem_flag
&& d
< 0)
4189 op1
= gen_int_mode (abs_d
, compute_mode
);
4195 quotient
= expand_unop (compute_mode
, neg_optab
, op0
,
4197 else if (HOST_BITS_PER_WIDE_INT
>= size
4198 && abs_d
== (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4200 /* This case is not handled correctly below. */
4201 quotient
= emit_store_flag (tquotient
, EQ
, op0
, op1
,
4202 compute_mode
, 1, 1);
4206 else if (EXACT_POWER_OF_2_OR_ZERO_P (d
)
4208 ? smod_pow2_cheap (speed
, compute_mode
)
4209 : sdiv_pow2_cheap (speed
, compute_mode
))
4210 /* We assume that cheap metric is true if the
4211 optab has an expander for this mode. */
4212 && ((optab_handler ((rem_flag
? smod_optab
4215 != CODE_FOR_nothing
)
4216 || (optab_handler (sdivmod_optab
,
4218 != CODE_FOR_nothing
)))
4220 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d
))
4224 remainder
= expand_smod_pow2 (compute_mode
, op0
, d
);
4226 return gen_lowpart (mode
, remainder
);
4229 if (sdiv_pow2_cheap (speed
, compute_mode
)
4230 && ((optab_handler (sdiv_optab
, compute_mode
)
4231 != CODE_FOR_nothing
)
4232 || (optab_handler (sdivmod_optab
, compute_mode
)
4233 != CODE_FOR_nothing
)))
4234 quotient
= expand_divmod (0, TRUNC_DIV_EXPR
,
4236 gen_int_mode (abs_d
,
4240 quotient
= expand_sdiv_pow2 (compute_mode
, op0
, abs_d
);
4242 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4243 negate the quotient. */
4246 insn
= get_last_insn ();
4248 && abs_d
< ((unsigned HOST_WIDE_INT
) 1
4249 << (HOST_BITS_PER_WIDE_INT
- 1)))
4250 set_dst_reg_note (insn
, REG_EQUAL
,
4251 gen_rtx_DIV (compute_mode
, op0
,
4257 quotient
= expand_unop (compute_mode
, neg_optab
,
4258 quotient
, quotient
, 0);
4261 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4263 choose_multiplier (abs_d
, size
, size
- 1,
4264 &ml
, &post_shift
, &lgup
);
4265 if (ml
< (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4269 if (post_shift
>= BITS_PER_WORD
4270 || size
- 1 >= BITS_PER_WORD
)
4273 extra_cost
= (shift_cost (speed
, compute_mode
, post_shift
)
4274 + shift_cost (speed
, compute_mode
, size
- 1)
4275 + add_cost (speed
, compute_mode
));
4276 t1
= expmed_mult_highpart (compute_mode
, op0
,
4277 GEN_INT (ml
), NULL_RTX
, 0,
4278 max_cost
- extra_cost
);
4282 (RSHIFT_EXPR
, compute_mode
, t1
,
4283 post_shift
, NULL_RTX
, 0);
4285 (RSHIFT_EXPR
, compute_mode
, op0
,
4286 size
- 1, NULL_RTX
, 0);
4289 = force_operand (gen_rtx_MINUS (compute_mode
,
4294 = force_operand (gen_rtx_MINUS (compute_mode
,
4302 if (post_shift
>= BITS_PER_WORD
4303 || size
- 1 >= BITS_PER_WORD
)
4306 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (size
- 1);
4307 mlr
= gen_int_mode (ml
, compute_mode
);
4308 extra_cost
= (shift_cost (speed
, compute_mode
, post_shift
)
4309 + shift_cost (speed
, compute_mode
, size
- 1)
4310 + 2 * add_cost (speed
, compute_mode
));
4311 t1
= expmed_mult_highpart (compute_mode
, op0
, mlr
,
4313 max_cost
- extra_cost
);
4316 t2
= force_operand (gen_rtx_PLUS (compute_mode
,
4320 (RSHIFT_EXPR
, compute_mode
, t2
,
4321 post_shift
, NULL_RTX
, 0);
4323 (RSHIFT_EXPR
, compute_mode
, op0
,
4324 size
- 1, NULL_RTX
, 0);
4327 = force_operand (gen_rtx_MINUS (compute_mode
,
4332 = force_operand (gen_rtx_MINUS (compute_mode
,
4337 else /* Too wide mode to use tricky code */
4340 insn
= get_last_insn ();
4342 set_dst_reg_note (insn
, REG_EQUAL
,
4343 gen_rtx_DIV (compute_mode
, op0
, op1
),
4349 delete_insns_since (last
);
4352 case FLOOR_DIV_EXPR
:
4353 case FLOOR_MOD_EXPR
:
4354 /* We will come here only for signed operations. */
4355 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4357 unsigned HOST_WIDE_INT mh
, ml
;
4358 int pre_shift
, lgup
, post_shift
;
4359 HOST_WIDE_INT d
= INTVAL (op1
);
4363 /* We could just as easily deal with negative constants here,
4364 but it does not seem worth the trouble for GCC 2.6. */
4365 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4367 pre_shift
= floor_log2 (d
);
4370 remainder
= expand_binop (compute_mode
, and_optab
, op0
,
4371 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4372 remainder
, 0, OPTAB_LIB_WIDEN
);
4374 return gen_lowpart (mode
, remainder
);
4376 quotient
= expand_shift
4377 (RSHIFT_EXPR
, compute_mode
, op0
,
4378 pre_shift
, tquotient
, 0);
4384 mh
= choose_multiplier (d
, size
, size
- 1,
4385 &ml
, &post_shift
, &lgup
);
4388 if (post_shift
< BITS_PER_WORD
4389 && size
- 1 < BITS_PER_WORD
)
4392 (RSHIFT_EXPR
, compute_mode
, op0
,
4393 size
- 1, NULL_RTX
, 0);
4394 t2
= expand_binop (compute_mode
, xor_optab
, op0
, t1
,
4395 NULL_RTX
, 0, OPTAB_WIDEN
);
4396 extra_cost
= (shift_cost (speed
, compute_mode
, post_shift
)
4397 + shift_cost (speed
, compute_mode
, size
- 1)
4398 + 2 * add_cost (speed
, compute_mode
));
4399 t3
= expmed_mult_highpart (compute_mode
, t2
,
4400 GEN_INT (ml
), NULL_RTX
, 1,
4401 max_cost
- extra_cost
);
4405 (RSHIFT_EXPR
, compute_mode
, t3
,
4406 post_shift
, NULL_RTX
, 1);
4407 quotient
= expand_binop (compute_mode
, xor_optab
,
4408 t4
, t1
, tquotient
, 0,
4416 rtx nsign
, t1
, t2
, t3
, t4
;
4417 t1
= force_operand (gen_rtx_PLUS (compute_mode
,
4418 op0
, constm1_rtx
), NULL_RTX
);
4419 t2
= expand_binop (compute_mode
, ior_optab
, op0
, t1
, NULL_RTX
,
4421 nsign
= expand_shift
4422 (RSHIFT_EXPR
, compute_mode
, t2
,
4423 size
- 1, NULL_RTX
, 0);
4424 t3
= force_operand (gen_rtx_MINUS (compute_mode
, t1
, nsign
),
4426 t4
= expand_divmod (0, TRUNC_DIV_EXPR
, compute_mode
, t3
, op1
,
4431 t5
= expand_unop (compute_mode
, one_cmpl_optab
, nsign
,
4433 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4442 delete_insns_since (last
);
4444 /* Try using an instruction that produces both the quotient and
4445 remainder, using truncation. We can easily compensate the quotient
4446 or remainder to get floor rounding, once we have the remainder.
4447 Notice that we compute also the final remainder value here,
4448 and return the result right away. */
4449 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4450 target
= gen_reg_rtx (compute_mode
);
4455 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4456 quotient
= gen_reg_rtx (compute_mode
);
4461 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4462 remainder
= gen_reg_rtx (compute_mode
);
4465 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
,
4466 quotient
, remainder
, 0))
4468 /* This could be computed with a branch-less sequence.
4469 Save that for later. */
4471 rtx label
= gen_label_rtx ();
4472 do_cmp_and_jump (remainder
, const0_rtx
, EQ
, compute_mode
, label
);
4473 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4474 NULL_RTX
, 0, OPTAB_WIDEN
);
4475 do_cmp_and_jump (tem
, const0_rtx
, GE
, compute_mode
, label
);
4476 expand_dec (quotient
, const1_rtx
);
4477 expand_inc (remainder
, op1
);
4479 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4482 /* No luck with division elimination or divmod. Have to do it
4483 by conditionally adjusting op0 *and* the result. */
4485 rtx label1
, label2
, label3
, label4
, label5
;
4489 quotient
= gen_reg_rtx (compute_mode
);
4490 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4491 label1
= gen_label_rtx ();
4492 label2
= gen_label_rtx ();
4493 label3
= gen_label_rtx ();
4494 label4
= gen_label_rtx ();
4495 label5
= gen_label_rtx ();
4496 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4497 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
, compute_mode
, label1
);
4498 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4499 quotient
, 0, OPTAB_LIB_WIDEN
);
4500 if (tem
!= quotient
)
4501 emit_move_insn (quotient
, tem
);
4502 emit_jump_insn (gen_jump (label5
));
4504 emit_label (label1
);
4505 expand_inc (adjusted_op0
, const1_rtx
);
4506 emit_jump_insn (gen_jump (label4
));
4508 emit_label (label2
);
4509 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
, compute_mode
, label3
);
4510 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4511 quotient
, 0, OPTAB_LIB_WIDEN
);
4512 if (tem
!= quotient
)
4513 emit_move_insn (quotient
, tem
);
4514 emit_jump_insn (gen_jump (label5
));
4516 emit_label (label3
);
4517 expand_dec (adjusted_op0
, const1_rtx
);
4518 emit_label (label4
);
4519 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4520 quotient
, 0, OPTAB_LIB_WIDEN
);
4521 if (tem
!= quotient
)
4522 emit_move_insn (quotient
, tem
);
4523 expand_dec (quotient
, const1_rtx
);
4524 emit_label (label5
);
4532 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
)))
4535 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4536 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4537 floor_log2 (d
), tquotient
, 1);
4538 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4540 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4541 t3
= gen_reg_rtx (compute_mode
);
4542 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4543 compute_mode
, 1, 1);
4547 lab
= gen_label_rtx ();
4548 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4549 expand_inc (t1
, const1_rtx
);
4554 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4560 /* Try using an instruction that produces both the quotient and
4561 remainder, using truncation. We can easily compensate the
4562 quotient or remainder to get ceiling rounding, once we have the
4563 remainder. Notice that we compute also the final remainder
4564 value here, and return the result right away. */
4565 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4566 target
= gen_reg_rtx (compute_mode
);
4570 remainder
= (REG_P (target
)
4571 ? target
: gen_reg_rtx (compute_mode
));
4572 quotient
= gen_reg_rtx (compute_mode
);
4576 quotient
= (REG_P (target
)
4577 ? target
: gen_reg_rtx (compute_mode
));
4578 remainder
= gen_reg_rtx (compute_mode
);
4581 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
,
4584 /* This could be computed with a branch-less sequence.
4585 Save that for later. */
4586 rtx label
= gen_label_rtx ();
4587 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4588 compute_mode
, label
);
4589 expand_inc (quotient
, const1_rtx
);
4590 expand_dec (remainder
, op1
);
4592 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4595 /* No luck with division elimination or divmod. Have to do it
4596 by conditionally adjusting op0 *and* the result. */
4599 rtx adjusted_op0
, tem
;
4601 quotient
= gen_reg_rtx (compute_mode
);
4602 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4603 label1
= gen_label_rtx ();
4604 label2
= gen_label_rtx ();
4605 do_cmp_and_jump (adjusted_op0
, const0_rtx
, NE
,
4606 compute_mode
, label1
);
4607 emit_move_insn (quotient
, const0_rtx
);
4608 emit_jump_insn (gen_jump (label2
));
4610 emit_label (label1
);
4611 expand_dec (adjusted_op0
, const1_rtx
);
4612 tem
= expand_binop (compute_mode
, udiv_optab
, adjusted_op0
, op1
,
4613 quotient
, 1, OPTAB_LIB_WIDEN
);
4614 if (tem
!= quotient
)
4615 emit_move_insn (quotient
, tem
);
4616 expand_inc (quotient
, const1_rtx
);
4617 emit_label (label2
);
4622 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4623 && INTVAL (op1
) >= 0)
4625 /* This is extremely similar to the code for the unsigned case
4626 above. For 2.7 we should merge these variants, but for
4627 2.6.1 I don't want to touch the code for unsigned since that
4628 get used in C. The signed case will only be used by other
4632 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4633 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4634 floor_log2 (d
), tquotient
, 0);
4635 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4637 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4638 t3
= gen_reg_rtx (compute_mode
);
4639 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4640 compute_mode
, 1, 1);
4644 lab
= gen_label_rtx ();
4645 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4646 expand_inc (t1
, const1_rtx
);
4651 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4657 /* Try using an instruction that produces both the quotient and
4658 remainder, using truncation. We can easily compensate the
4659 quotient or remainder to get ceiling rounding, once we have the
4660 remainder. Notice that we compute also the final remainder
4661 value here, and return the result right away. */
4662 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4663 target
= gen_reg_rtx (compute_mode
);
4666 remainder
= (REG_P (target
)
4667 ? target
: gen_reg_rtx (compute_mode
));
4668 quotient
= gen_reg_rtx (compute_mode
);
4672 quotient
= (REG_P (target
)
4673 ? target
: gen_reg_rtx (compute_mode
));
4674 remainder
= gen_reg_rtx (compute_mode
);
4677 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
,
4680 /* This could be computed with a branch-less sequence.
4681 Save that for later. */
4683 rtx label
= gen_label_rtx ();
4684 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4685 compute_mode
, label
);
4686 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4687 NULL_RTX
, 0, OPTAB_WIDEN
);
4688 do_cmp_and_jump (tem
, const0_rtx
, LT
, compute_mode
, label
);
4689 expand_inc (quotient
, const1_rtx
);
4690 expand_dec (remainder
, op1
);
4692 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4695 /* No luck with division elimination or divmod. Have to do it
4696 by conditionally adjusting op0 *and* the result. */
4698 rtx label1
, label2
, label3
, label4
, label5
;
4702 quotient
= gen_reg_rtx (compute_mode
);
4703 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4704 label1
= gen_label_rtx ();
4705 label2
= gen_label_rtx ();
4706 label3
= gen_label_rtx ();
4707 label4
= gen_label_rtx ();
4708 label5
= gen_label_rtx ();
4709 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4710 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
,
4711 compute_mode
, label1
);
4712 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4713 quotient
, 0, OPTAB_LIB_WIDEN
);
4714 if (tem
!= quotient
)
4715 emit_move_insn (quotient
, tem
);
4716 emit_jump_insn (gen_jump (label5
));
4718 emit_label (label1
);
4719 expand_dec (adjusted_op0
, const1_rtx
);
4720 emit_jump_insn (gen_jump (label4
));
4722 emit_label (label2
);
4723 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
,
4724 compute_mode
, label3
);
4725 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4726 quotient
, 0, OPTAB_LIB_WIDEN
);
4727 if (tem
!= quotient
)
4728 emit_move_insn (quotient
, tem
);
4729 emit_jump_insn (gen_jump (label5
));
4731 emit_label (label3
);
4732 expand_inc (adjusted_op0
, const1_rtx
);
4733 emit_label (label4
);
4734 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4735 quotient
, 0, OPTAB_LIB_WIDEN
);
4736 if (tem
!= quotient
)
4737 emit_move_insn (quotient
, tem
);
4738 expand_inc (quotient
, const1_rtx
);
4739 emit_label (label5
);
4744 case EXACT_DIV_EXPR
:
4745 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4747 HOST_WIDE_INT d
= INTVAL (op1
);
4748 unsigned HOST_WIDE_INT ml
;
4752 pre_shift
= floor_log2 (d
& -d
);
4753 ml
= invert_mod2n (d
>> pre_shift
, size
);
4754 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4755 pre_shift
, NULL_RTX
, unsignedp
);
4756 quotient
= expand_mult (compute_mode
, t1
,
4757 gen_int_mode (ml
, compute_mode
),
4760 insn
= get_last_insn ();
4761 set_dst_reg_note (insn
, REG_EQUAL
,
4762 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
4763 compute_mode
, op0
, op1
),
4768 case ROUND_DIV_EXPR
:
4769 case ROUND_MOD_EXPR
:
4774 label
= gen_label_rtx ();
4775 quotient
= gen_reg_rtx (compute_mode
);
4776 remainder
= gen_reg_rtx (compute_mode
);
4777 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
, remainder
, 1) == 0)
4780 quotient
= expand_binop (compute_mode
, udiv_optab
, op0
, op1
,
4781 quotient
, 1, OPTAB_LIB_WIDEN
);
4782 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 1);
4783 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4784 remainder
, 1, OPTAB_LIB_WIDEN
);
4786 tem
= plus_constant (compute_mode
, op1
, -1);
4787 tem
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
, 1, NULL_RTX
, 1);
4788 do_cmp_and_jump (remainder
, tem
, LEU
, compute_mode
, label
);
4789 expand_inc (quotient
, const1_rtx
);
4790 expand_dec (remainder
, op1
);
4795 rtx abs_rem
, abs_op1
, tem
, mask
;
4797 label
= gen_label_rtx ();
4798 quotient
= gen_reg_rtx (compute_mode
);
4799 remainder
= gen_reg_rtx (compute_mode
);
4800 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
, remainder
, 0) == 0)
4803 quotient
= expand_binop (compute_mode
, sdiv_optab
, op0
, op1
,
4804 quotient
, 0, OPTAB_LIB_WIDEN
);
4805 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 0);
4806 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4807 remainder
, 0, OPTAB_LIB_WIDEN
);
4809 abs_rem
= expand_abs (compute_mode
, remainder
, NULL_RTX
, 1, 0);
4810 abs_op1
= expand_abs (compute_mode
, op1
, NULL_RTX
, 1, 0);
4811 tem
= expand_shift (LSHIFT_EXPR
, compute_mode
, abs_rem
,
4813 do_cmp_and_jump (tem
, abs_op1
, LTU
, compute_mode
, label
);
4814 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4815 NULL_RTX
, 0, OPTAB_WIDEN
);
4816 mask
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4817 size
- 1, NULL_RTX
, 0);
4818 tem
= expand_binop (compute_mode
, xor_optab
, mask
, const1_rtx
,
4819 NULL_RTX
, 0, OPTAB_WIDEN
);
4820 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4821 NULL_RTX
, 0, OPTAB_WIDEN
);
4822 expand_inc (quotient
, tem
);
4823 tem
= expand_binop (compute_mode
, xor_optab
, mask
, op1
,
4824 NULL_RTX
, 0, OPTAB_WIDEN
);
4825 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4826 NULL_RTX
, 0, OPTAB_WIDEN
);
4827 expand_dec (remainder
, tem
);
4830 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4838 if (target
&& GET_MODE (target
) != compute_mode
)
4843 /* Try to produce the remainder without producing the quotient.
4844 If we seem to have a divmod pattern that does not require widening,
4845 don't try widening here. We should really have a WIDEN argument
4846 to expand_twoval_binop, since what we'd really like to do here is
4847 1) try a mod insn in compute_mode
4848 2) try a divmod insn in compute_mode
4849 3) try a div insn in compute_mode and multiply-subtract to get
4851 4) try the same things with widening allowed. */
4853 = sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4856 ((optab_handler (optab2
, compute_mode
)
4857 != CODE_FOR_nothing
)
4858 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4861 /* No luck there. Can we do remainder and divide at once
4862 without a library call? */
4863 remainder
= gen_reg_rtx (compute_mode
);
4864 if (! expand_twoval_binop ((unsignedp
4868 NULL_RTX
, remainder
, unsignedp
))
4873 return gen_lowpart (mode
, remainder
);
4876 /* Produce the quotient. Try a quotient insn, but not a library call.
4877 If we have a divmod in this mode, use it in preference to widening
4878 the div (for this test we assume it will not fail). Note that optab2
4879 is set to the one of the two optabs that the call below will use. */
4881 = sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
4882 op0
, op1
, rem_flag
? NULL_RTX
: target
,
4884 ((optab_handler (optab2
, compute_mode
)
4885 != CODE_FOR_nothing
)
4886 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4890 /* No luck there. Try a quotient-and-remainder insn,
4891 keeping the quotient alone. */
4892 quotient
= gen_reg_rtx (compute_mode
);
4893 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
4895 quotient
, NULL_RTX
, unsignedp
))
4899 /* Still no luck. If we are not computing the remainder,
4900 use a library call for the quotient. */
4901 quotient
= sign_expand_binop (compute_mode
,
4902 udiv_optab
, sdiv_optab
,
4904 unsignedp
, OPTAB_LIB_WIDEN
);
4911 if (target
&& GET_MODE (target
) != compute_mode
)
4916 /* No divide instruction either. Use library for remainder. */
4917 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4919 unsignedp
, OPTAB_LIB_WIDEN
);
4920 /* No remainder function. Try a quotient-and-remainder
4921 function, keeping the remainder. */
4924 remainder
= gen_reg_rtx (compute_mode
);
4925 if (!expand_twoval_binop_libfunc
4926 (unsignedp
? udivmod_optab
: sdivmod_optab
,
4928 NULL_RTX
, remainder
,
4929 unsignedp
? UMOD
: MOD
))
4930 remainder
= NULL_RTX
;
4935 /* We divided. Now finish doing X - Y * (X / Y). */
4936 remainder
= expand_mult (compute_mode
, quotient
, op1
,
4937 NULL_RTX
, unsignedp
);
4938 remainder
= expand_binop (compute_mode
, sub_optab
, op0
,
4939 remainder
, target
, unsignedp
,
4944 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4947 /* Return a tree node with data type TYPE, describing the value of X.
4948 Usually this is an VAR_DECL, if there is no obvious better choice.
4949 X may be an expression, however we only support those expressions
4950 generated by loop.c. */
4953 make_tree (tree type
, rtx x
)
4957 switch (GET_CODE (x
))
4961 HOST_WIDE_INT hi
= 0;
4964 && !(TYPE_UNSIGNED (type
)
4965 && (GET_MODE_BITSIZE (TYPE_MODE (type
))
4966 < HOST_BITS_PER_WIDE_INT
)))
4969 t
= build_int_cst_wide (type
, INTVAL (x
), hi
);
4975 if (GET_MODE (x
) == VOIDmode
)
4976 t
= build_int_cst_wide (type
,
4977 CONST_DOUBLE_LOW (x
), CONST_DOUBLE_HIGH (x
));
4982 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
4983 t
= build_real (type
, d
);
4990 int units
= CONST_VECTOR_NUNITS (x
);
4991 tree itype
= TREE_TYPE (type
);
4995 /* Build a tree with vector elements. */
4996 elts
= XALLOCAVEC (tree
, units
);
4997 for (i
= units
- 1; i
>= 0; --i
)
4999 rtx elt
= CONST_VECTOR_ELT (x
, i
);
5000 elts
[i
] = make_tree (itype
, elt
);
5003 return build_vector (type
, elts
);
5007 return fold_build2 (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5008 make_tree (type
, XEXP (x
, 1)));
5011 return fold_build2 (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5012 make_tree (type
, XEXP (x
, 1)));
5015 return fold_build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0)));
5018 return fold_build2 (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5019 make_tree (type
, XEXP (x
, 1)));
5022 return fold_build2 (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5023 make_tree (type
, XEXP (x
, 1)));
5026 t
= unsigned_type_for (type
);
5027 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5028 make_tree (t
, XEXP (x
, 0)),
5029 make_tree (type
, XEXP (x
, 1))));
5032 t
= signed_type_for (type
);
5033 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5034 make_tree (t
, XEXP (x
, 0)),
5035 make_tree (type
, XEXP (x
, 1))));
5038 if (TREE_CODE (type
) != REAL_TYPE
)
5039 t
= signed_type_for (type
);
5043 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5044 make_tree (t
, XEXP (x
, 0)),
5045 make_tree (t
, XEXP (x
, 1))));
5047 t
= unsigned_type_for (type
);
5048 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5049 make_tree (t
, XEXP (x
, 0)),
5050 make_tree (t
, XEXP (x
, 1))));
5054 t
= lang_hooks
.types
.type_for_mode (GET_MODE (XEXP (x
, 0)),
5055 GET_CODE (x
) == ZERO_EXTEND
);
5056 return fold_convert (type
, make_tree (t
, XEXP (x
, 0)));
5059 return make_tree (type
, XEXP (x
, 0));
5062 t
= SYMBOL_REF_DECL (x
);
5064 return fold_convert (type
, build_fold_addr_expr (t
));
5065 /* else fall through. */
5068 t
= build_decl (RTL_LOCATION (x
), VAR_DECL
, NULL_TREE
, type
);
5070 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5071 address mode to pointer mode. */
5072 if (POINTER_TYPE_P (type
))
5073 x
= convert_memory_address_addr_space
5074 (TYPE_MODE (type
), x
, TYPE_ADDR_SPACE (TREE_TYPE (type
)));
5076 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5077 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5078 t
->decl_with_rtl
.rtl
= x
;
5084 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5085 and returning TARGET.
5087 If TARGET is 0, a pseudo-register or constant is returned. */
5090 expand_and (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
)
5094 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
5095 tem
= simplify_binary_operation (AND
, mode
, op0
, op1
);
5097 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
5101 else if (tem
!= target
)
5102 emit_move_insn (target
, tem
);
5106 /* Helper function for emit_store_flag. */
5108 emit_cstore (rtx target
, enum insn_code icode
, enum rtx_code code
,
5109 enum machine_mode mode
, enum machine_mode compare_mode
,
5110 int unsignedp
, rtx x
, rtx y
, int normalizep
,
5111 enum machine_mode target_mode
)
5113 struct expand_operand ops
[4];
5114 rtx op0
, last
, comparison
, subtarget
;
5115 enum machine_mode result_mode
= targetm
.cstore_mode (icode
);
5117 last
= get_last_insn ();
5118 x
= prepare_operand (icode
, x
, 2, mode
, compare_mode
, unsignedp
);
5119 y
= prepare_operand (icode
, y
, 3, mode
, compare_mode
, unsignedp
);
5122 delete_insns_since (last
);
5126 if (target_mode
== VOIDmode
)
5127 target_mode
= result_mode
;
5129 target
= gen_reg_rtx (target_mode
);
5131 comparison
= gen_rtx_fmt_ee (code
, result_mode
, x
, y
);
5133 create_output_operand (&ops
[0], optimize
? NULL_RTX
: target
, result_mode
);
5134 create_fixed_operand (&ops
[1], comparison
);
5135 create_fixed_operand (&ops
[2], x
);
5136 create_fixed_operand (&ops
[3], y
);
5137 if (!maybe_expand_insn (icode
, 4, ops
))
5139 delete_insns_since (last
);
5142 subtarget
= ops
[0].value
;
5144 /* If we are converting to a wider mode, first convert to
5145 TARGET_MODE, then normalize. This produces better combining
5146 opportunities on machines that have a SIGN_EXTRACT when we are
5147 testing a single bit. This mostly benefits the 68k.
5149 If STORE_FLAG_VALUE does not have the sign bit set when
5150 interpreted in MODE, we can do this conversion as unsigned, which
5151 is usually more efficient. */
5152 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (result_mode
))
5154 convert_move (target
, subtarget
,
5155 val_signbit_known_clear_p (result_mode
,
5158 result_mode
= target_mode
;
5163 /* If we want to keep subexpressions around, don't reuse our last
5168 /* Now normalize to the proper value in MODE. Sometimes we don't
5169 have to do anything. */
5170 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
5172 /* STORE_FLAG_VALUE might be the most negative number, so write
5173 the comparison this way to avoid a compiler-time warning. */
5174 else if (- normalizep
== STORE_FLAG_VALUE
)
5175 op0
= expand_unop (result_mode
, neg_optab
, op0
, subtarget
, 0);
5177 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5178 it hard to use a value of just the sign bit due to ANSI integer
5179 constant typing rules. */
5180 else if (val_signbit_known_set_p (result_mode
, STORE_FLAG_VALUE
))
5181 op0
= expand_shift (RSHIFT_EXPR
, result_mode
, op0
,
5182 GET_MODE_BITSIZE (result_mode
) - 1, subtarget
,
5186 gcc_assert (STORE_FLAG_VALUE
& 1);
5188 op0
= expand_and (result_mode
, op0
, const1_rtx
, subtarget
);
5189 if (normalizep
== -1)
5190 op0
= expand_unop (result_mode
, neg_optab
, op0
, op0
, 0);
5193 /* If we were converting to a smaller mode, do the conversion now. */
5194 if (target_mode
!= result_mode
)
5196 convert_move (target
, op0
, 0);
5204 /* A subroutine of emit_store_flag only including "tricks" that do not
5205 need a recursive call. These are kept separate to avoid infinite
5209 emit_store_flag_1 (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5210 enum machine_mode mode
, int unsignedp
, int normalizep
,
5211 enum machine_mode target_mode
)
5214 enum insn_code icode
;
5215 enum machine_mode compare_mode
;
5216 enum mode_class mclass
;
5217 enum rtx_code scode
;
5221 code
= unsigned_condition (code
);
5222 scode
= swap_condition (code
);
5224 /* If one operand is constant, make it the second one. Only do this
5225 if the other operand is not constant as well. */
5227 if (swap_commutative_operands_p (op0
, op1
))
5232 code
= swap_condition (code
);
5235 if (mode
== VOIDmode
)
5236 mode
= GET_MODE (op0
);
5238 /* For some comparisons with 1 and -1, we can convert this to
5239 comparisons with zero. This will often produce more opportunities for
5240 store-flag insns. */
5245 if (op1
== const1_rtx
)
5246 op1
= const0_rtx
, code
= LE
;
5249 if (op1
== constm1_rtx
)
5250 op1
= const0_rtx
, code
= LT
;
5253 if (op1
== const1_rtx
)
5254 op1
= const0_rtx
, code
= GT
;
5257 if (op1
== constm1_rtx
)
5258 op1
= const0_rtx
, code
= GE
;
5261 if (op1
== const1_rtx
)
5262 op1
= const0_rtx
, code
= NE
;
5265 if (op1
== const1_rtx
)
5266 op1
= const0_rtx
, code
= EQ
;
5272 /* If we are comparing a double-word integer with zero or -1, we can
5273 convert the comparison into one involving a single word. */
5274 if (GET_MODE_BITSIZE (mode
) == BITS_PER_WORD
* 2
5275 && GET_MODE_CLASS (mode
) == MODE_INT
5276 && (!MEM_P (op0
) || ! MEM_VOLATILE_P (op0
)))
5278 if ((code
== EQ
|| code
== NE
)
5279 && (op1
== const0_rtx
|| op1
== constm1_rtx
))
5283 /* Do a logical OR or AND of the two words and compare the
5285 op00
= simplify_gen_subreg (word_mode
, op0
, mode
, 0);
5286 op01
= simplify_gen_subreg (word_mode
, op0
, mode
, UNITS_PER_WORD
);
5287 tem
= expand_binop (word_mode
,
5288 op1
== const0_rtx
? ior_optab
: and_optab
,
5289 op00
, op01
, NULL_RTX
, unsignedp
,
5293 tem
= emit_store_flag (NULL_RTX
, code
, tem
, op1
, word_mode
,
5294 unsignedp
, normalizep
);
5296 else if ((code
== LT
|| code
== GE
) && op1
== const0_rtx
)
5300 /* If testing the sign bit, can just test on high word. */
5301 op0h
= simplify_gen_subreg (word_mode
, op0
, mode
,
5302 subreg_highpart_offset (word_mode
,
5304 tem
= emit_store_flag (NULL_RTX
, code
, op0h
, op1
, word_mode
,
5305 unsignedp
, normalizep
);
5312 if (target_mode
== VOIDmode
|| GET_MODE (tem
) == target_mode
)
5315 target
= gen_reg_rtx (target_mode
);
5317 convert_move (target
, tem
,
5318 !val_signbit_known_set_p (word_mode
,
5319 (normalizep
? normalizep
5320 : STORE_FLAG_VALUE
)));
5325 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5326 complement of A (for GE) and shifting the sign bit to the low bit. */
5327 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
5328 && GET_MODE_CLASS (mode
) == MODE_INT
5329 && (normalizep
|| STORE_FLAG_VALUE
== 1
5330 || val_signbit_p (mode
, STORE_FLAG_VALUE
)))
5337 /* If the result is to be wider than OP0, it is best to convert it
5338 first. If it is to be narrower, it is *incorrect* to convert it
5340 else if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (mode
))
5342 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5346 if (target_mode
!= mode
)
5350 op0
= expand_unop (mode
, one_cmpl_optab
, op0
,
5351 ((STORE_FLAG_VALUE
== 1 || normalizep
)
5352 ? 0 : subtarget
), 0);
5354 if (STORE_FLAG_VALUE
== 1 || normalizep
)
5355 /* If we are supposed to produce a 0/1 value, we want to do
5356 a logical shift from the sign bit to the low-order bit; for
5357 a -1/0 value, we do an arithmetic shift. */
5358 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5359 GET_MODE_BITSIZE (mode
) - 1,
5360 subtarget
, normalizep
!= -1);
5362 if (mode
!= target_mode
)
5363 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5368 mclass
= GET_MODE_CLASS (mode
);
5369 for (compare_mode
= mode
; compare_mode
!= VOIDmode
;
5370 compare_mode
= GET_MODE_WIDER_MODE (compare_mode
))
5372 enum machine_mode optab_mode
= mclass
== MODE_CC
? CCmode
: compare_mode
;
5373 icode
= optab_handler (cstore_optab
, optab_mode
);
5374 if (icode
!= CODE_FOR_nothing
)
5376 do_pending_stack_adjust ();
5377 tem
= emit_cstore (target
, icode
, code
, mode
, compare_mode
,
5378 unsignedp
, op0
, op1
, normalizep
, target_mode
);
5382 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5384 tem
= emit_cstore (target
, icode
, scode
, mode
, compare_mode
,
5385 unsignedp
, op1
, op0
, normalizep
, target_mode
);
5396 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5397 and storing in TARGET. Normally return TARGET.
5398 Return 0 if that cannot be done.
5400 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5401 it is VOIDmode, they cannot both be CONST_INT.
5403 UNSIGNEDP is for the case where we have to widen the operands
5404 to perform the operation. It says to use zero-extension.
5406 NORMALIZEP is 1 if we should convert the result to be either zero
5407 or one. Normalize is -1 if we should convert the result to be
5408 either zero or -1. If NORMALIZEP is zero, the result will be left
5409 "raw" out of the scc insn. */
5412 emit_store_flag (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5413 enum machine_mode mode
, int unsignedp
, int normalizep
)
5415 enum machine_mode target_mode
= target
? GET_MODE (target
) : VOIDmode
;
5416 enum rtx_code rcode
;
5418 rtx tem
, last
, trueval
;
5420 tem
= emit_store_flag_1 (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
,
5425 /* If we reached here, we can't do this with a scc insn, however there
5426 are some comparisons that can be done in other ways. Don't do any
5427 of these cases if branches are very cheap. */
5428 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5431 /* See what we need to return. We can only return a 1, -1, or the
5434 if (normalizep
== 0)
5436 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
5437 normalizep
= STORE_FLAG_VALUE
;
5439 else if (val_signbit_p (mode
, STORE_FLAG_VALUE
))
5445 last
= get_last_insn ();
5447 /* If optimizing, use different pseudo registers for each insn, instead
5448 of reusing the same pseudo. This leads to better CSE, but slows
5449 down the compiler, since there are more pseudos */
5450 subtarget
= (!optimize
5451 && (target_mode
== mode
)) ? target
: NULL_RTX
;
5452 trueval
= GEN_INT (normalizep
? normalizep
: STORE_FLAG_VALUE
);
5454 /* For floating-point comparisons, try the reverse comparison or try
5455 changing the "orderedness" of the comparison. */
5456 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5458 enum rtx_code first_code
;
5461 rcode
= reverse_condition_maybe_unordered (code
);
5462 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5463 && (code
== ORDERED
|| code
== UNORDERED
5464 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5465 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5467 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5468 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5470 /* For the reverse comparison, use either an addition or a XOR. */
5472 && rtx_cost (GEN_INT (normalizep
), PLUS
, 1,
5473 optimize_insn_for_speed_p ()) == 0)
5475 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5476 STORE_FLAG_VALUE
, target_mode
);
5478 return expand_binop (target_mode
, add_optab
, tem
,
5479 GEN_INT (normalizep
),
5480 target
, 0, OPTAB_WIDEN
);
5483 && rtx_cost (trueval
, XOR
, 1,
5484 optimize_insn_for_speed_p ()) == 0)
5486 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5487 normalizep
, target_mode
);
5489 return expand_binop (target_mode
, xor_optab
, tem
, trueval
,
5490 target
, INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5494 delete_insns_since (last
);
5496 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5497 if (code
== ORDERED
|| code
== UNORDERED
)
5500 and_them
= split_comparison (code
, mode
, &first_code
, &code
);
5502 /* If there are no NaNs, the first comparison should always fall through.
5503 Effectively change the comparison to the other one. */
5504 if (!HONOR_NANS (mode
))
5506 gcc_assert (first_code
== (and_them
? ORDERED
: UNORDERED
));
5507 return emit_store_flag_1 (target
, code
, op0
, op1
, mode
, 0, normalizep
,
5511 #ifdef HAVE_conditional_move
5512 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5513 conditional move. */
5514 tem
= emit_store_flag_1 (subtarget
, first_code
, op0
, op1
, mode
, 0,
5515 normalizep
, target_mode
);
5520 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5521 tem
, const0_rtx
, GET_MODE (tem
), 0);
5523 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5524 trueval
, tem
, GET_MODE (tem
), 0);
5527 delete_insns_since (last
);
5534 /* The remaining tricks only apply to integer comparisons. */
5536 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5539 /* If this is an equality comparison of integers, we can try to exclusive-or
5540 (or subtract) the two operands and use a recursive call to try the
5541 comparison with zero. Don't do any of these cases if branches are
5544 if ((code
== EQ
|| code
== NE
) && op1
!= const0_rtx
)
5546 tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
5550 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
5553 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
5554 mode
, unsignedp
, normalizep
);
5558 delete_insns_since (last
);
5561 /* For integer comparisons, try the reverse comparison. However, for
5562 small X and if we'd have anyway to extend, implementing "X != 0"
5563 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5564 rcode
= reverse_condition (code
);
5565 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5566 && ! (optab_handler (cstore_optab
, mode
) == CODE_FOR_nothing
5568 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
5569 && op1
== const0_rtx
))
5571 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5572 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5574 /* Again, for the reverse comparison, use either an addition or a XOR. */
5576 && rtx_cost (GEN_INT (normalizep
), PLUS
, 1,
5577 optimize_insn_for_speed_p ()) == 0)
5579 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5580 STORE_FLAG_VALUE
, target_mode
);
5582 tem
= expand_binop (target_mode
, add_optab
, tem
,
5583 GEN_INT (normalizep
), target
, 0, OPTAB_WIDEN
);
5586 && rtx_cost (trueval
, XOR
, 1,
5587 optimize_insn_for_speed_p ()) == 0)
5589 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5590 normalizep
, target_mode
);
5592 tem
= expand_binop (target_mode
, xor_optab
, tem
, trueval
, target
,
5593 INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5598 delete_insns_since (last
);
5601 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5602 the constant zero. Reject all other comparisons at this point. Only
5603 do LE and GT if branches are expensive since they are expensive on
5604 2-operand machines. */
5606 if (op1
!= const0_rtx
5607 || (code
!= EQ
&& code
!= NE
5608 && (BRANCH_COST (optimize_insn_for_speed_p (),
5609 false) <= 1 || (code
!= LE
&& code
!= GT
))))
5612 /* Try to put the result of the comparison in the sign bit. Assume we can't
5613 do the necessary operation below. */
5617 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5618 the sign bit set. */
5622 /* This is destructive, so SUBTARGET can't be OP0. */
5623 if (rtx_equal_p (subtarget
, op0
))
5626 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
5629 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
5633 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5634 number of bits in the mode of OP0, minus one. */
5638 if (rtx_equal_p (subtarget
, op0
))
5641 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5642 GET_MODE_BITSIZE (mode
) - 1,
5644 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
5648 if (code
== EQ
|| code
== NE
)
5650 /* For EQ or NE, one way to do the comparison is to apply an operation
5651 that converts the operand into a positive number if it is nonzero
5652 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5653 for NE we negate. This puts the result in the sign bit. Then we
5654 normalize with a shift, if needed.
5656 Two operations that can do the above actions are ABS and FFS, so try
5657 them. If that doesn't work, and MODE is smaller than a full word,
5658 we can use zero-extension to the wider mode (an unsigned conversion)
5659 as the operation. */
5661 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5662 that is compensated by the subsequent overflow when subtracting
5665 if (optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)
5666 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
5667 else if (optab_handler (ffs_optab
, mode
) != CODE_FOR_nothing
)
5668 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
5669 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
5671 tem
= convert_modes (word_mode
, mode
, op0
, 1);
5678 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
5681 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
5684 /* If we couldn't do it that way, for NE we can "or" the two's complement
5685 of the value with itself. For EQ, we take the one's complement of
5686 that "or", which is an extra insn, so we only handle EQ if branches
5691 || BRANCH_COST (optimize_insn_for_speed_p (),
5694 if (rtx_equal_p (subtarget
, op0
))
5697 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
5698 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
5701 if (tem
&& code
== EQ
)
5702 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
5706 if (tem
&& normalizep
)
5707 tem
= expand_shift (RSHIFT_EXPR
, mode
, tem
,
5708 GET_MODE_BITSIZE (mode
) - 1,
5709 subtarget
, normalizep
== 1);
5715 else if (GET_MODE (tem
) != target_mode
)
5717 convert_move (target
, tem
, 0);
5720 else if (!subtarget
)
5722 emit_move_insn (target
, tem
);
5727 delete_insns_since (last
);
5732 /* Like emit_store_flag, but always succeeds. */
5735 emit_store_flag_force (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5736 enum machine_mode mode
, int unsignedp
, int normalizep
)
5739 rtx trueval
, falseval
;
5741 /* First see if emit_store_flag can do the job. */
5742 tem
= emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
);
5747 target
= gen_reg_rtx (word_mode
);
5749 /* If this failed, we have to do this with set/compare/jump/set code.
5750 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5751 trueval
= normalizep
? GEN_INT (normalizep
) : const1_rtx
;
5753 && GET_MODE_CLASS (mode
) == MODE_INT
5756 && op1
== const0_rtx
)
5758 label
= gen_label_rtx ();
5759 do_compare_rtx_and_jump (target
, const0_rtx
, EQ
, unsignedp
,
5760 mode
, NULL_RTX
, NULL_RTX
, label
, -1);
5761 emit_move_insn (target
, trueval
);
5767 || reg_mentioned_p (target
, op0
) || reg_mentioned_p (target
, op1
))
5768 target
= gen_reg_rtx (GET_MODE (target
));
5770 /* Jump in the right direction if the target cannot implement CODE
5771 but can jump on its reverse condition. */
5772 falseval
= const0_rtx
;
5773 if (! can_compare_p (code
, mode
, ccp_jump
)
5774 && (! FLOAT_MODE_P (mode
)
5775 || code
== ORDERED
|| code
== UNORDERED
5776 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5777 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5779 enum rtx_code rcode
;
5780 if (FLOAT_MODE_P (mode
))
5781 rcode
= reverse_condition_maybe_unordered (code
);
5783 rcode
= reverse_condition (code
);
5785 /* Canonicalize to UNORDERED for the libcall. */
5786 if (can_compare_p (rcode
, mode
, ccp_jump
)
5787 || (code
== ORDERED
&& ! can_compare_p (ORDERED
, mode
, ccp_jump
)))
5790 trueval
= const0_rtx
;
5795 emit_move_insn (target
, trueval
);
5796 label
= gen_label_rtx ();
5797 do_compare_rtx_and_jump (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
,
5798 NULL_RTX
, label
, -1);
5800 emit_move_insn (target
, falseval
);
5806 /* Perform possibly multi-word comparison and conditional jump to LABEL
5807 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5808 now a thin wrapper around do_compare_rtx_and_jump. */
5811 do_cmp_and_jump (rtx arg1
, rtx arg2
, enum rtx_code op
, enum machine_mode mode
,
5814 int unsignedp
= (op
== LTU
|| op
== LEU
|| op
== GTU
|| op
== GEU
);
5815 do_compare_rtx_and_jump (arg1
, arg2
, op
, unsignedp
, mode
,
5816 NULL_RTX
, NULL_RTX
, label
, -1);