1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
6 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
29 #include "diagnostic-core.h"
34 #include "insn-config.h"
38 #include "langhooks.h"
43 struct target_expmed default_target_expmed
;
45 struct target_expmed
*this_target_expmed
= &default_target_expmed
;
48 static void store_fixed_bit_field (rtx
, unsigned HOST_WIDE_INT
,
49 unsigned HOST_WIDE_INT
,
50 unsigned HOST_WIDE_INT
,
51 unsigned HOST_WIDE_INT
,
53 static void store_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
54 unsigned HOST_WIDE_INT
,
55 unsigned HOST_WIDE_INT
,
56 unsigned HOST_WIDE_INT
,
58 static rtx
extract_fixed_bit_field (enum machine_mode
, rtx
,
59 unsigned HOST_WIDE_INT
,
60 unsigned HOST_WIDE_INT
, rtx
, int, bool);
61 static rtx
mask_rtx (enum machine_mode
, int, int, int);
62 static rtx
lshift_value (enum machine_mode
, rtx
, int, int);
63 static rtx
extract_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
64 unsigned HOST_WIDE_INT
, int);
65 static void do_cmp_and_jump (rtx
, rtx
, enum rtx_code
, enum machine_mode
, rtx
);
66 static rtx
expand_smod_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
67 static rtx
expand_sdiv_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
69 /* Test whether a value is zero of a power of two. */
70 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
72 #ifndef SLOW_UNALIGNED_ACCESS
73 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
77 /* Reduce conditional compilation elsewhere. */
80 #define CODE_FOR_insv CODE_FOR_nothing
81 #define gen_insv(a,b,c,d) NULL_RTX
85 #define CODE_FOR_extv CODE_FOR_nothing
86 #define gen_extv(a,b,c,d) NULL_RTX
90 #define CODE_FOR_extzv CODE_FOR_nothing
91 #define gen_extzv(a,b,c,d) NULL_RTX
94 struct init_expmed_rtl
96 struct rtx_def reg
; rtunion reg_fld
[2];
97 struct rtx_def plus
; rtunion plus_fld1
;
99 struct rtx_def mult
; rtunion mult_fld1
;
100 struct rtx_def sdiv
; rtunion sdiv_fld1
;
101 struct rtx_def udiv
; rtunion udiv_fld1
;
102 struct rtx_def sdiv_32
; rtunion sdiv_32_fld1
;
103 struct rtx_def smod_32
; rtunion smod_32_fld1
;
104 struct rtx_def wide_mult
; rtunion wide_mult_fld1
;
105 struct rtx_def wide_lshr
; rtunion wide_lshr_fld1
;
106 struct rtx_def wide_trunc
;
107 struct rtx_def shift
; rtunion shift_fld1
;
108 struct rtx_def shift_mult
; rtunion shift_mult_fld1
;
109 struct rtx_def shift_add
; rtunion shift_add_fld1
;
110 struct rtx_def shift_sub0
; rtunion shift_sub0_fld1
;
111 struct rtx_def shift_sub1
; rtunion shift_sub1_fld1
;
113 struct rtx_def trunc
;
115 rtx pow2
[MAX_BITS_PER_WORD
];
116 rtx cint
[MAX_BITS_PER_WORD
];
120 init_expmed_one_conv (struct init_expmed_rtl
*all
, enum machine_mode to_mode
,
121 enum machine_mode from_mode
, bool speed
)
123 int to_size
, from_size
;
126 /* We're given no information about the true size of a partial integer,
127 only the size of the "full" integer it requires for storage. For
128 comparison purposes here, reduce the bit size by one in that case. */
129 to_size
= (GET_MODE_BITSIZE (to_mode
)
130 - (GET_MODE_CLASS (to_mode
) == MODE_PARTIAL_INT
));
131 from_size
= (GET_MODE_BITSIZE (from_mode
)
132 - (GET_MODE_CLASS (from_mode
) == MODE_PARTIAL_INT
));
134 /* Assume cost of zero-extend and sign-extend is the same. */
135 which
= (to_size
< from_size
? &all
->trunc
: &all
->zext
);
137 PUT_MODE (&all
->reg
, from_mode
);
138 set_convert_cost (to_mode
, from_mode
, speed
, set_src_cost (which
, speed
));
142 init_expmed_one_mode (struct init_expmed_rtl
*all
,
143 enum machine_mode mode
, int speed
)
145 int m
, n
, mode_bitsize
;
146 enum machine_mode mode_from
;
148 mode_bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
150 PUT_MODE (&all
->reg
, mode
);
151 PUT_MODE (&all
->plus
, mode
);
152 PUT_MODE (&all
->neg
, mode
);
153 PUT_MODE (&all
->mult
, mode
);
154 PUT_MODE (&all
->sdiv
, mode
);
155 PUT_MODE (&all
->udiv
, mode
);
156 PUT_MODE (&all
->sdiv_32
, mode
);
157 PUT_MODE (&all
->smod_32
, mode
);
158 PUT_MODE (&all
->wide_trunc
, mode
);
159 PUT_MODE (&all
->shift
, mode
);
160 PUT_MODE (&all
->shift_mult
, mode
);
161 PUT_MODE (&all
->shift_add
, mode
);
162 PUT_MODE (&all
->shift_sub0
, mode
);
163 PUT_MODE (&all
->shift_sub1
, mode
);
164 PUT_MODE (&all
->zext
, mode
);
165 PUT_MODE (&all
->trunc
, mode
);
167 set_add_cost (speed
, mode
, set_src_cost (&all
->plus
, speed
));
168 set_neg_cost (speed
, mode
, set_src_cost (&all
->neg
, speed
));
169 set_mul_cost (speed
, mode
, set_src_cost (&all
->mult
, speed
));
170 set_sdiv_cost (speed
, mode
, set_src_cost (&all
->sdiv
, speed
));
171 set_udiv_cost (speed
, mode
, set_src_cost (&all
->udiv
, speed
));
173 set_sdiv_pow2_cheap (speed
, mode
, (set_src_cost (&all
->sdiv_32
, speed
)
174 <= 2 * add_cost (speed
, mode
)));
175 set_smod_pow2_cheap (speed
, mode
, (set_src_cost (&all
->smod_32
, speed
)
176 <= 4 * add_cost (speed
, mode
)));
178 set_shift_cost (speed
, mode
, 0, 0);
180 int cost
= add_cost (speed
, mode
);
181 set_shiftadd_cost (speed
, mode
, 0, cost
);
182 set_shiftsub0_cost (speed
, mode
, 0, cost
);
183 set_shiftsub1_cost (speed
, mode
, 0, cost
);
186 n
= MIN (MAX_BITS_PER_WORD
, mode_bitsize
);
187 for (m
= 1; m
< n
; m
++)
189 XEXP (&all
->shift
, 1) = all
->cint
[m
];
190 XEXP (&all
->shift_mult
, 1) = all
->pow2
[m
];
192 set_shift_cost (speed
, mode
, m
, set_src_cost (&all
->shift
, speed
));
193 set_shiftadd_cost (speed
, mode
, m
, set_src_cost (&all
->shift_add
, speed
));
194 set_shiftsub0_cost (speed
, mode
, m
, set_src_cost (&all
->shift_sub0
, speed
));
195 set_shiftsub1_cost (speed
, mode
, m
, set_src_cost (&all
->shift_sub1
, speed
));
198 if (SCALAR_INT_MODE_P (mode
))
200 for (mode_from
= MIN_MODE_INT
; mode_from
<= MAX_MODE_INT
;
201 mode_from
= (enum machine_mode
)(mode_from
+ 1))
202 init_expmed_one_conv (all
, mode
, mode_from
, speed
);
204 if (GET_MODE_CLASS (mode
) == MODE_INT
)
206 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
207 if (wider_mode
!= VOIDmode
)
209 PUT_MODE (&all
->zext
, wider_mode
);
210 PUT_MODE (&all
->wide_mult
, wider_mode
);
211 PUT_MODE (&all
->wide_lshr
, wider_mode
);
212 XEXP (&all
->wide_lshr
, 1) = GEN_INT (mode_bitsize
);
214 set_mul_widen_cost (speed
, wider_mode
,
215 set_src_cost (&all
->wide_mult
, speed
));
216 set_mul_highpart_cost (speed
, mode
,
217 set_src_cost (&all
->wide_trunc
, speed
));
225 struct init_expmed_rtl all
;
226 enum machine_mode mode
;
229 memset (&all
, 0, sizeof all
);
230 for (m
= 1; m
< MAX_BITS_PER_WORD
; m
++)
232 all
.pow2
[m
] = GEN_INT ((HOST_WIDE_INT
) 1 << m
);
233 all
.cint
[m
] = GEN_INT (m
);
236 PUT_CODE (&all
.reg
, REG
);
237 /* Avoid using hard regs in ways which may be unsupported. */
238 SET_REGNO (&all
.reg
, LAST_VIRTUAL_REGISTER
+ 1);
240 PUT_CODE (&all
.plus
, PLUS
);
241 XEXP (&all
.plus
, 0) = &all
.reg
;
242 XEXP (&all
.plus
, 1) = &all
.reg
;
244 PUT_CODE (&all
.neg
, NEG
);
245 XEXP (&all
.neg
, 0) = &all
.reg
;
247 PUT_CODE (&all
.mult
, MULT
);
248 XEXP (&all
.mult
, 0) = &all
.reg
;
249 XEXP (&all
.mult
, 1) = &all
.reg
;
251 PUT_CODE (&all
.sdiv
, DIV
);
252 XEXP (&all
.sdiv
, 0) = &all
.reg
;
253 XEXP (&all
.sdiv
, 1) = &all
.reg
;
255 PUT_CODE (&all
.udiv
, UDIV
);
256 XEXP (&all
.udiv
, 0) = &all
.reg
;
257 XEXP (&all
.udiv
, 1) = &all
.reg
;
259 PUT_CODE (&all
.sdiv_32
, DIV
);
260 XEXP (&all
.sdiv_32
, 0) = &all
.reg
;
261 XEXP (&all
.sdiv_32
, 1) = 32 < MAX_BITS_PER_WORD
? all
.cint
[32] : GEN_INT (32);
263 PUT_CODE (&all
.smod_32
, MOD
);
264 XEXP (&all
.smod_32
, 0) = &all
.reg
;
265 XEXP (&all
.smod_32
, 1) = XEXP (&all
.sdiv_32
, 1);
267 PUT_CODE (&all
.zext
, ZERO_EXTEND
);
268 XEXP (&all
.zext
, 0) = &all
.reg
;
270 PUT_CODE (&all
.wide_mult
, MULT
);
271 XEXP (&all
.wide_mult
, 0) = &all
.zext
;
272 XEXP (&all
.wide_mult
, 1) = &all
.zext
;
274 PUT_CODE (&all
.wide_lshr
, LSHIFTRT
);
275 XEXP (&all
.wide_lshr
, 0) = &all
.wide_mult
;
277 PUT_CODE (&all
.wide_trunc
, TRUNCATE
);
278 XEXP (&all
.wide_trunc
, 0) = &all
.wide_lshr
;
280 PUT_CODE (&all
.shift
, ASHIFT
);
281 XEXP (&all
.shift
, 0) = &all
.reg
;
283 PUT_CODE (&all
.shift_mult
, MULT
);
284 XEXP (&all
.shift_mult
, 0) = &all
.reg
;
286 PUT_CODE (&all
.shift_add
, PLUS
);
287 XEXP (&all
.shift_add
, 0) = &all
.shift_mult
;
288 XEXP (&all
.shift_add
, 1) = &all
.reg
;
290 PUT_CODE (&all
.shift_sub0
, MINUS
);
291 XEXP (&all
.shift_sub0
, 0) = &all
.shift_mult
;
292 XEXP (&all
.shift_sub0
, 1) = &all
.reg
;
294 PUT_CODE (&all
.shift_sub1
, MINUS
);
295 XEXP (&all
.shift_sub1
, 0) = &all
.reg
;
296 XEXP (&all
.shift_sub1
, 1) = &all
.shift_mult
;
298 PUT_CODE (&all
.trunc
, TRUNCATE
);
299 XEXP (&all
.trunc
, 0) = &all
.reg
;
301 for (speed
= 0; speed
< 2; speed
++)
303 crtl
->maybe_hot_insn_p
= speed
;
304 set_zero_cost (speed
, set_src_cost (const0_rtx
, speed
));
306 for (mode
= MIN_MODE_INT
; mode
<= MAX_MODE_INT
;
307 mode
= (enum machine_mode
)(mode
+ 1))
308 init_expmed_one_mode (&all
, mode
, speed
);
310 if (MIN_MODE_PARTIAL_INT
!= VOIDmode
)
311 for (mode
= MIN_MODE_PARTIAL_INT
; mode
<= MAX_MODE_PARTIAL_INT
;
312 mode
= (enum machine_mode
)(mode
+ 1))
313 init_expmed_one_mode (&all
, mode
, speed
);
315 if (MIN_MODE_VECTOR_INT
!= VOIDmode
)
316 for (mode
= MIN_MODE_VECTOR_INT
; mode
<= MAX_MODE_VECTOR_INT
;
317 mode
= (enum machine_mode
)(mode
+ 1))
318 init_expmed_one_mode (&all
, mode
, speed
);
321 if (alg_hash_used_p ())
323 struct alg_hash_entry
*p
= alg_hash_entry_ptr (0);
324 memset (p
, 0, sizeof (*p
) * NUM_ALG_HASH_ENTRIES
);
327 set_alg_hash_used_p (true);
328 default_rtl_profile ();
331 /* Return an rtx representing minus the value of X.
332 MODE is the intended mode of the result,
333 useful if X is a CONST_INT. */
336 negate_rtx (enum machine_mode mode
, rtx x
)
338 rtx result
= simplify_unary_operation (NEG
, mode
, x
, mode
);
341 result
= expand_unop (mode
, neg_optab
, x
, NULL_RTX
, 0);
346 /* Report on the availability of insv/extv/extzv and the desired mode
347 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
348 is false; else the mode of the specified operand. If OPNO is -1,
349 all the caller cares about is whether the insn is available. */
351 mode_for_extraction (enum extraction_pattern pattern
, int opno
)
353 const struct insn_data_d
*data
;
360 data
= &insn_data
[CODE_FOR_insv
];
363 return MAX_MACHINE_MODE
;
368 data
= &insn_data
[CODE_FOR_extv
];
371 return MAX_MACHINE_MODE
;
376 data
= &insn_data
[CODE_FOR_extzv
];
379 return MAX_MACHINE_MODE
;
388 /* Everyone who uses this function used to follow it with
389 if (result == VOIDmode) result = word_mode; */
390 if (data
->operand
[opno
].mode
== VOIDmode
)
392 return data
->operand
[opno
].mode
;
395 /* Return true if a bitfield of size BITSIZE at bit number BITNUM within
396 a structure of mode STRUCT_MODE represents a lowpart subreg. The subreg
397 offset is then BITNUM / BITS_PER_UNIT. */
400 lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum
,
401 unsigned HOST_WIDE_INT bitsize
,
402 enum machine_mode struct_mode
)
404 if (BYTES_BIG_ENDIAN
)
405 return (bitnum
% BITS_PER_UNIT
== 0
406 && (bitnum
+ bitsize
== GET_MODE_BITSIZE (struct_mode
)
407 || (bitnum
+ bitsize
) % BITS_PER_WORD
== 0));
409 return bitnum
% BITS_PER_WORD
== 0;
412 /* A subroutine of store_bit_field, with the same arguments. Return true
413 if the operation could be implemented.
415 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
416 no other way of implementing the operation. If FALLBACK_P is false,
417 return false instead. */
420 store_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
421 unsigned HOST_WIDE_INT bitnum
,
422 unsigned HOST_WIDE_INT bitregion_start
,
423 unsigned HOST_WIDE_INT bitregion_end
,
424 enum machine_mode fieldmode
,
425 rtx value
, bool fallback_p
)
430 while (GET_CODE (op0
) == SUBREG
)
432 /* The following line once was done only if WORDS_BIG_ENDIAN,
433 but I think that is a mistake. WORDS_BIG_ENDIAN is
434 meaningful at a much higher level; when structures are copied
435 between memory and regs, the higher-numbered regs
436 always get higher addresses. */
437 int inner_mode_size
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)));
438 int outer_mode_size
= GET_MODE_SIZE (GET_MODE (op0
));
441 /* Paradoxical subregs need special handling on big endian machines. */
442 if (SUBREG_BYTE (op0
) == 0 && inner_mode_size
< outer_mode_size
)
444 int difference
= inner_mode_size
- outer_mode_size
;
446 if (WORDS_BIG_ENDIAN
)
447 byte_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
448 if (BYTES_BIG_ENDIAN
)
449 byte_offset
+= difference
% UNITS_PER_WORD
;
452 byte_offset
= SUBREG_BYTE (op0
);
454 bitnum
+= byte_offset
* BITS_PER_UNIT
;
455 op0
= SUBREG_REG (op0
);
458 /* No action is needed if the target is a register and if the field
459 lies completely outside that register. This can occur if the source
460 code contains an out-of-bounds access to a small array. */
461 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
464 /* Use vec_set patterns for inserting parts of vectors whenever
466 if (VECTOR_MODE_P (GET_MODE (op0
))
468 && optab_handler (vec_set_optab
, GET_MODE (op0
)) != CODE_FOR_nothing
469 && fieldmode
== GET_MODE_INNER (GET_MODE (op0
))
470 && bitsize
== GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
471 && !(bitnum
% GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
473 struct expand_operand ops
[3];
474 enum machine_mode outermode
= GET_MODE (op0
);
475 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
476 enum insn_code icode
= optab_handler (vec_set_optab
, outermode
);
477 int pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
479 create_fixed_operand (&ops
[0], op0
);
480 create_input_operand (&ops
[1], value
, innermode
);
481 create_integer_operand (&ops
[2], pos
);
482 if (maybe_expand_insn (icode
, 3, ops
))
486 /* If the target is a register, overwriting the entire object, or storing
487 a full-word or multi-word field can be done with just a SUBREG. */
489 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
490 && ((bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)) && bitnum
== 0)
491 || (bitsize
% BITS_PER_WORD
== 0 && bitnum
% BITS_PER_WORD
== 0)))
493 /* Use the subreg machinery either to narrow OP0 to the required
494 words or to cope with mode punning between equal-sized modes. */
495 rtx sub
= simplify_gen_subreg (fieldmode
, op0
, GET_MODE (op0
),
496 bitnum
/ BITS_PER_UNIT
);
499 emit_move_insn (sub
, value
);
504 /* If the target is memory, storing any naturally aligned field can be
505 done with a simple store. For targets that support fast unaligned
506 memory, any naturally sized, unit aligned field can be done directly. */
508 && bitnum
% BITS_PER_UNIT
== 0
509 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
510 && (!SLOW_UNALIGNED_ACCESS (fieldmode
, MEM_ALIGN (op0
))
511 || (bitnum
% bitsize
== 0
512 && MEM_ALIGN (op0
) % bitsize
== 0)))
514 op0
= adjust_bitfield_address (op0
, fieldmode
, bitnum
/ BITS_PER_UNIT
);
515 emit_move_insn (op0
, value
);
519 /* Make sure we are playing with integral modes. Pun with subregs
520 if we aren't. This must come after the entire register case above,
521 since that case is valid for any mode. The following cases are only
522 valid for integral modes. */
524 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
525 if (imode
!= GET_MODE (op0
))
528 op0
= adjust_bitfield_address (op0
, imode
, 0);
531 gcc_assert (imode
!= BLKmode
);
532 op0
= gen_lowpart (imode
, op0
);
537 /* Storing an lsb-aligned field in a register
538 can be done with a movstrict instruction. */
541 && lowpart_bit_field_p (bitnum
, bitsize
, GET_MODE (op0
))
542 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
543 && optab_handler (movstrict_optab
, fieldmode
) != CODE_FOR_nothing
)
545 struct expand_operand ops
[2];
546 enum insn_code icode
= optab_handler (movstrict_optab
, fieldmode
);
548 unsigned HOST_WIDE_INT subreg_off
;
550 if (GET_CODE (arg0
) == SUBREG
)
552 /* Else we've got some float mode source being extracted into
553 a different float mode destination -- this combination of
554 subregs results in Severe Tire Damage. */
555 gcc_assert (GET_MODE (SUBREG_REG (arg0
)) == fieldmode
556 || GET_MODE_CLASS (fieldmode
) == MODE_INT
557 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
);
558 arg0
= SUBREG_REG (arg0
);
561 subreg_off
= bitnum
/ BITS_PER_UNIT
;
562 if (validate_subreg (fieldmode
, GET_MODE (arg0
), arg0
, subreg_off
))
564 arg0
= gen_rtx_SUBREG (fieldmode
, arg0
, subreg_off
);
566 create_fixed_operand (&ops
[0], arg0
);
567 /* Shrink the source operand to FIELDMODE. */
568 create_convert_operand_to (&ops
[1], value
, fieldmode
, false);
569 if (maybe_expand_insn (icode
, 2, ops
))
574 /* Handle fields bigger than a word. */
576 if (bitsize
> BITS_PER_WORD
)
578 /* Here we transfer the words of the field
579 in the order least significant first.
580 This is because the most significant word is the one which may
582 However, only do that if the value is not BLKmode. */
584 unsigned int backwards
= WORDS_BIG_ENDIAN
&& fieldmode
!= BLKmode
;
585 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
589 /* This is the mode we must force value to, so that there will be enough
590 subwords to extract. Note that fieldmode will often (always?) be
591 VOIDmode, because that is what store_field uses to indicate that this
592 is a bit field, but passing VOIDmode to operand_subword_force
594 fieldmode
= GET_MODE (value
);
595 if (fieldmode
== VOIDmode
)
596 fieldmode
= smallest_mode_for_size (nwords
* BITS_PER_WORD
, MODE_INT
);
598 last
= get_last_insn ();
599 for (i
= 0; i
< nwords
; i
++)
601 /* If I is 0, use the low-order word in both field and target;
602 if I is 1, use the next to lowest word; and so on. */
603 unsigned int wordnum
= (backwards
604 ? GET_MODE_SIZE (fieldmode
) / UNITS_PER_WORD
607 unsigned int bit_offset
= (backwards
608 ? MAX ((int) bitsize
- ((int) i
+ 1)
611 : (int) i
* BITS_PER_WORD
);
612 rtx value_word
= operand_subword_force (value
, wordnum
, fieldmode
);
613 unsigned HOST_WIDE_INT new_bitsize
=
614 MIN (BITS_PER_WORD
, bitsize
- i
* BITS_PER_WORD
);
616 /* If the remaining chunk doesn't have full wordsize we have
617 to make sure that for big endian machines the higher order
619 if (new_bitsize
< BITS_PER_WORD
&& BYTES_BIG_ENDIAN
&& !backwards
)
620 value_word
= simplify_expand_binop (word_mode
, lshr_optab
,
622 GEN_INT (BITS_PER_WORD
627 if (!store_bit_field_1 (op0
, new_bitsize
,
629 bitregion_start
, bitregion_end
,
631 value_word
, fallback_p
))
633 delete_insns_since (last
);
640 /* If VALUE has a floating-point or complex mode, access it as an
641 integer of the corresponding size. This can occur on a machine
642 with 64 bit registers that uses SFmode for float. It can also
643 occur for unaligned float or complex fields. */
645 if (GET_MODE (value
) != VOIDmode
646 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_INT
647 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_PARTIAL_INT
)
649 value
= gen_reg_rtx (int_mode_for_mode (GET_MODE (value
)));
650 emit_move_insn (gen_lowpart (GET_MODE (orig_value
), value
), orig_value
);
653 /* If OP0 is a multi-word register, narrow it to the affected word.
654 If the region spans two words, defer to store_split_bit_field. */
655 if (!MEM_P (op0
) && GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
657 op0
= simplify_gen_subreg (word_mode
, op0
, GET_MODE (op0
),
658 bitnum
/ BITS_PER_WORD
* UNITS_PER_WORD
);
660 bitnum
%= BITS_PER_WORD
;
661 if (bitnum
+ bitsize
> BITS_PER_WORD
)
666 store_split_bit_field (op0
, bitsize
, bitnum
, bitregion_start
,
667 bitregion_end
, value
);
672 /* From here on we can assume that the field to be stored in fits
673 within a word. If the destination is a register, it too fits
676 enum machine_mode op_mode
= mode_for_extraction (EP_insv
, 3);
678 && GET_MODE (value
) != BLKmode
680 && GET_MODE_BITSIZE (op_mode
) >= bitsize
681 /* Do not use insv for volatile bitfields when
682 -fstrict-volatile-bitfields is in effect. */
683 && !(MEM_P (op0
) && MEM_VOLATILE_P (op0
)
684 && flag_strict_volatile_bitfields
> 0)
685 /* Do not use insv if the bit region is restricted and
686 op_mode integer at offset doesn't fit into the
687 restricted region. */
688 && !(MEM_P (op0
) && bitregion_end
689 && bitnum
- (bitnum
% BITS_PER_UNIT
) + GET_MODE_BITSIZE (op_mode
)
690 > bitregion_end
+ 1))
692 struct expand_operand ops
[4];
693 unsigned HOST_WIDE_INT bitpos
= bitnum
;
696 rtx last
= get_last_insn ();
697 bool copy_back
= false;
699 unsigned int unit
= GET_MODE_BITSIZE (op_mode
);
702 /* Get a reference to the first byte of the field. */
703 xop0
= adjust_bitfield_address (xop0
, byte_mode
,
704 bitpos
/ BITS_PER_UNIT
);
705 bitpos
%= BITS_PER_UNIT
;
709 /* Convert from counting within OP0 to counting in OP_MODE. */
710 if (BYTES_BIG_ENDIAN
)
711 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
714 /* If xop0 is a register, we need it in OP_MODE
715 to make it acceptable to the format of insv. */
716 if (GET_CODE (xop0
) == SUBREG
)
717 /* We can't just change the mode, because this might clobber op0,
718 and we will need the original value of op0 if insv fails. */
719 xop0
= gen_rtx_SUBREG (op_mode
, SUBREG_REG (xop0
), SUBREG_BYTE (xop0
));
720 if (REG_P (xop0
) && GET_MODE (xop0
) != op_mode
)
721 xop0
= gen_lowpart_SUBREG (op_mode
, xop0
);
723 /* If the destination is a paradoxical subreg such that we need a
724 truncate to the inner mode, perform the insertion on a temporary and
725 truncate the result to the original destination. Note that we can't
726 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
727 X) 0)) is (reg:N X). */
728 if (GET_CODE (xop0
) == SUBREG
729 && REG_P (SUBREG_REG (xop0
))
730 && (!TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0
)),
733 rtx tem
= gen_reg_rtx (op_mode
);
734 emit_move_insn (tem
, xop0
);
739 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
740 "backwards" from the size of the unit we are inserting into.
741 Otherwise, we count bits from the most significant on a
742 BYTES/BITS_BIG_ENDIAN machine. */
744 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
745 bitpos
= unit
- bitsize
- bitpos
;
747 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
749 if (GET_MODE (value
) != op_mode
)
751 if (GET_MODE_BITSIZE (GET_MODE (value
)) >= bitsize
)
753 /* Optimization: Don't bother really extending VALUE
754 if it has all the bits we will actually use. However,
755 if we must narrow it, be sure we do it correctly. */
757 if (GET_MODE_SIZE (GET_MODE (value
)) < GET_MODE_SIZE (op_mode
))
761 tmp
= simplify_subreg (op_mode
, value1
, GET_MODE (value
), 0);
763 tmp
= simplify_gen_subreg (op_mode
,
764 force_reg (GET_MODE (value
),
766 GET_MODE (value
), 0);
770 value1
= gen_lowpart (op_mode
, value1
);
772 else if (CONST_INT_P (value
))
773 value1
= gen_int_mode (INTVAL (value
), op_mode
);
775 /* Parse phase is supposed to make VALUE's data type
776 match that of the component reference, which is a type
777 at least as wide as the field; so VALUE should have
778 a mode that corresponds to that type. */
779 gcc_assert (CONSTANT_P (value
));
782 create_fixed_operand (&ops
[0], xop0
);
783 create_integer_operand (&ops
[1], bitsize
);
784 create_integer_operand (&ops
[2], bitpos
);
785 create_input_operand (&ops
[3], value1
, op_mode
);
786 if (maybe_expand_insn (CODE_FOR_insv
, 4, ops
))
789 convert_move (op0
, xop0
, true);
792 delete_insns_since (last
);
795 /* If OP0 is a memory, try copying it to a register and seeing if a
796 cheap register alternative is available. */
797 if (HAVE_insv
&& MEM_P (op0
))
799 enum machine_mode bestmode
;
800 unsigned HOST_WIDE_INT maxbits
= MAX_FIXED_MODE_SIZE
;
803 maxbits
= bitregion_end
- bitregion_start
+ 1;
805 /* Get the mode to use for inserting into this field. If OP0 is
806 BLKmode, get the smallest mode consistent with the alignment. If
807 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
808 mode. Otherwise, use the smallest mode containing the field. */
810 if (GET_MODE (op0
) == BLKmode
811 || GET_MODE_BITSIZE (GET_MODE (op0
)) > maxbits
812 || (op_mode
!= MAX_MACHINE_MODE
813 && GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (op_mode
)))
814 bestmode
= get_best_mode (bitsize
, bitnum
,
815 bitregion_start
, bitregion_end
,
817 (op_mode
== MAX_MACHINE_MODE
818 ? VOIDmode
: op_mode
),
819 MEM_VOLATILE_P (op0
));
821 bestmode
= GET_MODE (op0
);
823 if (bestmode
!= VOIDmode
824 && GET_MODE_SIZE (bestmode
) >= GET_MODE_SIZE (fieldmode
)
825 && !(SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (op0
))
826 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (op0
)))
828 rtx last
, tempreg
, xop0
;
830 unsigned HOST_WIDE_INT offset
, bitpos
;
832 last
= get_last_insn ();
834 /* Adjust address to point to the containing unit of
835 that mode. Compute the offset as a multiple of this unit,
836 counting in bytes. */
837 unit
= GET_MODE_BITSIZE (bestmode
);
838 offset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
839 bitpos
= bitnum
% unit
;
840 xop0
= adjust_bitfield_address (op0
, bestmode
, offset
);
842 /* Fetch that unit, store the bitfield in it, then store
844 tempreg
= copy_to_reg (xop0
);
845 if (store_bit_field_1 (tempreg
, bitsize
, bitpos
,
846 bitregion_start
, bitregion_end
,
847 fieldmode
, orig_value
, false))
849 emit_move_insn (xop0
, tempreg
);
852 delete_insns_since (last
);
859 store_fixed_bit_field (op0
, bitsize
, bitnum
, bitregion_start
,
860 bitregion_end
, value
);
864 /* Generate code to store value from rtx VALUE
865 into a bit-field within structure STR_RTX
866 containing BITSIZE bits starting at bit BITNUM.
868 BITREGION_START is bitpos of the first bitfield in this region.
869 BITREGION_END is the bitpos of the ending bitfield in this region.
870 These two fields are 0, if the C++ memory model does not apply,
871 or we are not interested in keeping track of bitfield regions.
873 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
876 store_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
877 unsigned HOST_WIDE_INT bitnum
,
878 unsigned HOST_WIDE_INT bitregion_start
,
879 unsigned HOST_WIDE_INT bitregion_end
,
880 enum machine_mode fieldmode
,
883 /* Under the C++0x memory model, we must not touch bits outside the
884 bit region. Adjust the address to start at the beginning of the
886 if (MEM_P (str_rtx
) && bitregion_start
> 0)
888 enum machine_mode bestmode
;
889 enum machine_mode op_mode
;
890 unsigned HOST_WIDE_INT offset
;
892 op_mode
= mode_for_extraction (EP_insv
, 3);
893 if (op_mode
== MAX_MACHINE_MODE
)
896 gcc_assert ((bitregion_start
% BITS_PER_UNIT
) == 0);
898 offset
= bitregion_start
/ BITS_PER_UNIT
;
899 bitnum
-= bitregion_start
;
900 bitregion_end
-= bitregion_start
;
902 bestmode
= get_best_mode (bitsize
, bitnum
,
903 bitregion_start
, bitregion_end
,
906 MEM_VOLATILE_P (str_rtx
));
907 str_rtx
= adjust_address (str_rtx
, bestmode
, offset
);
910 if (!store_bit_field_1 (str_rtx
, bitsize
, bitnum
,
911 bitregion_start
, bitregion_end
,
912 fieldmode
, value
, true))
916 /* Use shifts and boolean operations to store VALUE into a bit field of
917 width BITSIZE in OP0, starting at bit BITNUM. */
920 store_fixed_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
921 unsigned HOST_WIDE_INT bitnum
,
922 unsigned HOST_WIDE_INT bitregion_start
,
923 unsigned HOST_WIDE_INT bitregion_end
,
926 enum machine_mode mode
;
931 /* There is a case not handled here:
932 a structure with a known alignment of just a halfword
933 and a field split across two aligned halfwords within the structure.
934 Or likewise a structure with a known alignment of just a byte
935 and a field split across two bytes.
936 Such cases are not supposed to be able to occur. */
940 unsigned HOST_WIDE_INT maxbits
= MAX_FIXED_MODE_SIZE
;
943 maxbits
= bitregion_end
- bitregion_start
+ 1;
945 /* Get the proper mode to use for this field. We want a mode that
946 includes the entire field. If such a mode would be larger than
947 a word, we won't be doing the extraction the normal way.
948 We don't want a mode bigger than the destination. */
950 mode
= GET_MODE (op0
);
951 if (GET_MODE_BITSIZE (mode
) == 0
952 || GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (word_mode
))
955 if (MEM_VOLATILE_P (op0
)
956 && GET_MODE_BITSIZE (GET_MODE (op0
)) > 0
957 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= maxbits
958 && flag_strict_volatile_bitfields
> 0)
959 mode
= GET_MODE (op0
);
961 mode
= get_best_mode (bitsize
, bitnum
, bitregion_start
, bitregion_end
,
962 MEM_ALIGN (op0
), mode
, MEM_VOLATILE_P (op0
));
964 if (mode
== VOIDmode
)
966 /* The only way this should occur is if the field spans word
968 store_split_bit_field (op0
, bitsize
, bitnum
, bitregion_start
,
969 bitregion_end
, value
);
973 HOST_WIDE_INT bit_offset
= bitnum
- bitnum
% GET_MODE_BITSIZE (mode
);
974 op0
= adjust_bitfield_address (op0
, mode
, bit_offset
/ BITS_PER_UNIT
);
975 bitnum
-= bit_offset
;
978 mode
= GET_MODE (op0
);
979 gcc_assert (SCALAR_INT_MODE_P (mode
));
981 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
982 for invalid input, such as f5 from gcc.dg/pr48335-2.c. */
984 if (BYTES_BIG_ENDIAN
)
985 /* BITNUM is the distance between our msb
986 and that of the containing datum.
987 Convert it to the distance from the lsb. */
988 bitnum
= GET_MODE_BITSIZE (mode
) - bitsize
- bitnum
;
990 /* Now BITNUM is always the distance between our lsb
993 /* Shift VALUE left by BITNUM bits. If VALUE is not constant,
994 we must first convert its mode to MODE. */
996 if (CONST_INT_P (value
))
998 HOST_WIDE_INT v
= INTVAL (value
);
1000 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
1001 v
&= ((HOST_WIDE_INT
) 1 << bitsize
) - 1;
1005 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
1006 && v
== ((HOST_WIDE_INT
) 1 << bitsize
) - 1)
1007 || (bitsize
== HOST_BITS_PER_WIDE_INT
&& v
== -1))
1010 value
= lshift_value (mode
, value
, bitnum
, bitsize
);
1014 int must_and
= (GET_MODE_BITSIZE (GET_MODE (value
)) != bitsize
1015 && bitnum
+ bitsize
!= GET_MODE_BITSIZE (mode
));
1017 if (GET_MODE (value
) != mode
)
1018 value
= convert_to_mode (mode
, value
, 1);
1021 value
= expand_binop (mode
, and_optab
, value
,
1022 mask_rtx (mode
, 0, bitsize
, 0),
1023 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1025 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
1026 bitnum
, NULL_RTX
, 1);
1029 /* Now clear the chosen bits in OP0,
1030 except that if VALUE is -1 we need not bother. */
1031 /* We keep the intermediates in registers to allow CSE to combine
1032 consecutive bitfield assignments. */
1034 temp
= force_reg (mode
, op0
);
1038 temp
= expand_binop (mode
, and_optab
, temp
,
1039 mask_rtx (mode
, bitnum
, bitsize
, 1),
1040 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1041 temp
= force_reg (mode
, temp
);
1044 /* Now logical-or VALUE into OP0, unless it is zero. */
1048 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
1049 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1050 temp
= force_reg (mode
, temp
);
1055 op0
= copy_rtx (op0
);
1056 emit_move_insn (op0
, temp
);
1060 /* Store a bit field that is split across multiple accessible memory objects.
1062 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1063 BITSIZE is the field width; BITPOS the position of its first bit
1065 VALUE is the value to store.
1067 This does not yet handle fields wider than BITS_PER_WORD. */
1070 store_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1071 unsigned HOST_WIDE_INT bitpos
,
1072 unsigned HOST_WIDE_INT bitregion_start
,
1073 unsigned HOST_WIDE_INT bitregion_end
,
1077 unsigned int bitsdone
= 0;
1079 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1081 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1082 unit
= BITS_PER_WORD
;
1084 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1086 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1087 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1088 that VALUE might be a floating-point constant. */
1089 if (CONSTANT_P (value
) && !CONST_INT_P (value
))
1091 rtx word
= gen_lowpart_common (word_mode
, value
);
1093 if (word
&& (value
!= word
))
1096 value
= gen_lowpart_common (word_mode
,
1097 force_reg (GET_MODE (value
) != VOIDmode
1099 : word_mode
, value
));
1102 while (bitsdone
< bitsize
)
1104 unsigned HOST_WIDE_INT thissize
;
1106 unsigned HOST_WIDE_INT thispos
;
1107 unsigned HOST_WIDE_INT offset
;
1109 offset
= (bitpos
+ bitsdone
) / unit
;
1110 thispos
= (bitpos
+ bitsdone
) % unit
;
1112 /* When region of bytes we can touch is restricted, decrease
1113 UNIT close to the end of the region as needed. */
1115 && unit
> BITS_PER_UNIT
1116 && bitpos
+ bitsdone
- thispos
+ unit
> bitregion_end
+ 1)
1122 /* THISSIZE must not overrun a word boundary. Otherwise,
1123 store_fixed_bit_field will call us again, and we will mutually
1125 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1126 thissize
= MIN (thissize
, unit
- thispos
);
1128 if (BYTES_BIG_ENDIAN
)
1130 /* Fetch successively less significant portions. */
1131 if (CONST_INT_P (value
))
1132 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1133 >> (bitsize
- bitsdone
- thissize
))
1134 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1137 int total_bits
= GET_MODE_BITSIZE (GET_MODE (value
));
1138 /* The args are chosen so that the last part includes the
1139 lsb. Give extract_bit_field the value it needs (with
1140 endianness compensation) to fetch the piece we want. */
1141 part
= extract_fixed_bit_field (word_mode
, value
, thissize
,
1142 total_bits
- bitsize
+ bitsdone
,
1143 NULL_RTX
, 1, false);
1148 /* Fetch successively more significant portions. */
1149 if (CONST_INT_P (value
))
1150 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1152 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1154 part
= extract_fixed_bit_field (word_mode
, value
, thissize
,
1155 bitsdone
, NULL_RTX
, 1, false);
1158 /* If OP0 is a register, then handle OFFSET here.
1160 When handling multiword bitfields, extract_bit_field may pass
1161 down a word_mode SUBREG of a larger REG for a bitfield that actually
1162 crosses a word boundary. Thus, for a SUBREG, we must find
1163 the current word starting from the base register. */
1164 if (GET_CODE (op0
) == SUBREG
)
1166 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1167 enum machine_mode sub_mode
= GET_MODE (SUBREG_REG (op0
));
1168 if (sub_mode
!= BLKmode
&& GET_MODE_SIZE (sub_mode
) < UNITS_PER_WORD
)
1169 word
= word_offset
? const0_rtx
: op0
;
1171 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1172 GET_MODE (SUBREG_REG (op0
)));
1175 else if (REG_P (op0
))
1177 enum machine_mode op0_mode
= GET_MODE (op0
);
1178 if (op0_mode
!= BLKmode
&& GET_MODE_SIZE (op0_mode
) < UNITS_PER_WORD
)
1179 word
= offset
? const0_rtx
: op0
;
1181 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1187 /* OFFSET is in UNITs, and UNIT is in bits. If WORD is const0_rtx,
1188 it is just an out-of-bounds access. Ignore it. */
1189 if (word
!= const0_rtx
)
1190 store_fixed_bit_field (word
, thissize
, offset
* unit
+ thispos
,
1191 bitregion_start
, bitregion_end
, part
);
1192 bitsdone
+= thissize
;
1196 /* A subroutine of extract_bit_field_1 that converts return value X
1197 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1198 to extract_bit_field. */
1201 convert_extracted_bit_field (rtx x
, enum machine_mode mode
,
1202 enum machine_mode tmode
, bool unsignedp
)
1204 if (GET_MODE (x
) == tmode
|| GET_MODE (x
) == mode
)
1207 /* If the x mode is not a scalar integral, first convert to the
1208 integer mode of that size and then access it as a floating-point
1209 value via a SUBREG. */
1210 if (!SCALAR_INT_MODE_P (tmode
))
1212 enum machine_mode smode
;
1214 smode
= mode_for_size (GET_MODE_BITSIZE (tmode
), MODE_INT
, 0);
1215 x
= convert_to_mode (smode
, x
, unsignedp
);
1216 x
= force_reg (smode
, x
);
1217 return gen_lowpart (tmode
, x
);
1220 return convert_to_mode (tmode
, x
, unsignedp
);
1223 /* A subroutine of extract_bit_field, with the same arguments.
1224 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1225 if we can find no other means of implementing the operation.
1226 if FALLBACK_P is false, return NULL instead. */
1229 extract_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1230 unsigned HOST_WIDE_INT bitnum
,
1231 int unsignedp
, bool packedp
, rtx target
,
1232 enum machine_mode mode
, enum machine_mode tmode
,
1236 enum machine_mode int_mode
;
1237 enum machine_mode ext_mode
;
1238 enum machine_mode mode1
;
1240 if (tmode
== VOIDmode
)
1243 while (GET_CODE (op0
) == SUBREG
)
1245 bitnum
+= SUBREG_BYTE (op0
) * BITS_PER_UNIT
;
1246 op0
= SUBREG_REG (op0
);
1249 /* If we have an out-of-bounds access to a register, just return an
1250 uninitialized register of the required mode. This can occur if the
1251 source code contains an out-of-bounds access to a small array. */
1252 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
1253 return gen_reg_rtx (tmode
);
1256 && mode
== GET_MODE (op0
)
1258 && bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
1260 /* We're trying to extract a full register from itself. */
1264 /* See if we can get a better vector mode before extracting. */
1265 if (VECTOR_MODE_P (GET_MODE (op0
))
1267 && GET_MODE_INNER (GET_MODE (op0
)) != tmode
)
1269 enum machine_mode new_mode
;
1271 if (GET_MODE_CLASS (tmode
) == MODE_FLOAT
)
1272 new_mode
= MIN_MODE_VECTOR_FLOAT
;
1273 else if (GET_MODE_CLASS (tmode
) == MODE_FRACT
)
1274 new_mode
= MIN_MODE_VECTOR_FRACT
;
1275 else if (GET_MODE_CLASS (tmode
) == MODE_UFRACT
)
1276 new_mode
= MIN_MODE_VECTOR_UFRACT
;
1277 else if (GET_MODE_CLASS (tmode
) == MODE_ACCUM
)
1278 new_mode
= MIN_MODE_VECTOR_ACCUM
;
1279 else if (GET_MODE_CLASS (tmode
) == MODE_UACCUM
)
1280 new_mode
= MIN_MODE_VECTOR_UACCUM
;
1282 new_mode
= MIN_MODE_VECTOR_INT
;
1284 for (; new_mode
!= VOIDmode
; new_mode
= GET_MODE_WIDER_MODE (new_mode
))
1285 if (GET_MODE_SIZE (new_mode
) == GET_MODE_SIZE (GET_MODE (op0
))
1286 && targetm
.vector_mode_supported_p (new_mode
))
1288 if (new_mode
!= VOIDmode
)
1289 op0
= gen_lowpart (new_mode
, op0
);
1292 /* Use vec_extract patterns for extracting parts of vectors whenever
1294 if (VECTOR_MODE_P (GET_MODE (op0
))
1296 && optab_handler (vec_extract_optab
, GET_MODE (op0
)) != CODE_FOR_nothing
1297 && ((bitnum
+ bitsize
- 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
1298 == bitnum
/ GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
1300 struct expand_operand ops
[3];
1301 enum machine_mode outermode
= GET_MODE (op0
);
1302 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
1303 enum insn_code icode
= optab_handler (vec_extract_optab
, outermode
);
1304 unsigned HOST_WIDE_INT pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
1306 create_output_operand (&ops
[0], target
, innermode
);
1307 create_input_operand (&ops
[1], op0
, outermode
);
1308 create_integer_operand (&ops
[2], pos
);
1309 if (maybe_expand_insn (icode
, 3, ops
))
1311 target
= ops
[0].value
;
1312 if (GET_MODE (target
) != mode
)
1313 return gen_lowpart (tmode
, target
);
1318 /* Make sure we are playing with integral modes. Pun with subregs
1321 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
1322 if (imode
!= GET_MODE (op0
))
1325 op0
= adjust_bitfield_address (op0
, imode
, 0);
1326 else if (imode
!= BLKmode
)
1328 op0
= gen_lowpart (imode
, op0
);
1330 /* If we got a SUBREG, force it into a register since we
1331 aren't going to be able to do another SUBREG on it. */
1332 if (GET_CODE (op0
) == SUBREG
)
1333 op0
= force_reg (imode
, op0
);
1335 else if (REG_P (op0
))
1338 imode
= smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0
)),
1340 reg
= gen_reg_rtx (imode
);
1341 subreg
= gen_lowpart_SUBREG (GET_MODE (op0
), reg
);
1342 emit_move_insn (subreg
, op0
);
1344 bitnum
+= SUBREG_BYTE (subreg
) * BITS_PER_UNIT
;
1348 rtx mem
= assign_stack_temp (GET_MODE (op0
),
1349 GET_MODE_SIZE (GET_MODE (op0
)));
1350 emit_move_insn (mem
, op0
);
1351 op0
= adjust_bitfield_address (mem
, BLKmode
, 0);
1356 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1357 If that's wrong, the solution is to test for it and set TARGET to 0
1360 /* If the bitfield is volatile, we need to make sure the access
1361 remains on a type-aligned boundary. */
1362 if (GET_CODE (op0
) == MEM
1363 && MEM_VOLATILE_P (op0
)
1364 && GET_MODE_BITSIZE (GET_MODE (op0
)) > 0
1365 && flag_strict_volatile_bitfields
> 0)
1366 goto no_subreg_mode_swap
;
1368 /* Only scalar integer modes can be converted via subregs. There is an
1369 additional problem for FP modes here in that they can have a precision
1370 which is different from the size. mode_for_size uses precision, but
1371 we want a mode based on the size, so we must avoid calling it for FP
1374 if (SCALAR_INT_MODE_P (tmode
))
1376 enum machine_mode try_mode
= mode_for_size (bitsize
,
1377 GET_MODE_CLASS (tmode
), 0);
1378 if (try_mode
!= BLKmode
)
1381 gcc_assert (mode1
!= BLKmode
);
1383 /* Extraction of a full MODE1 value can be done with a subreg as long
1384 as the least significant bit of the value is the least significant
1385 bit of either OP0 or a word of OP0. */
1387 && lowpart_bit_field_p (bitnum
, bitsize
, GET_MODE (op0
))
1388 && bitsize
== GET_MODE_BITSIZE (mode1
)
1389 && TRULY_NOOP_TRUNCATION_MODES_P (mode1
, GET_MODE (op0
)))
1391 rtx sub
= simplify_gen_subreg (mode1
, op0
, GET_MODE (op0
),
1392 bitnum
/ BITS_PER_UNIT
);
1394 return convert_extracted_bit_field (sub
, mode
, tmode
, unsignedp
);
1397 /* Extraction of a full MODE1 value can be done with a load as long as
1398 the field is on a byte boundary and is sufficiently aligned. */
1400 && bitnum
% BITS_PER_UNIT
== 0
1401 && bitsize
== GET_MODE_BITSIZE (mode1
)
1402 && (!SLOW_UNALIGNED_ACCESS (mode1
, MEM_ALIGN (op0
))
1403 || (bitnum
% bitsize
== 0
1404 && MEM_ALIGN (op0
) % bitsize
== 0)))
1406 op0
= adjust_bitfield_address (op0
, mode1
, bitnum
/ BITS_PER_UNIT
);
1407 return convert_extracted_bit_field (op0
, mode
, tmode
, unsignedp
);
1410 no_subreg_mode_swap
:
1412 /* Handle fields bigger than a word. */
1414 if (bitsize
> BITS_PER_WORD
)
1416 /* Here we transfer the words of the field
1417 in the order least significant first.
1418 This is because the most significant word is the one which may
1419 be less than full. */
1421 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
1425 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1426 target
= gen_reg_rtx (mode
);
1428 /* Indicate for flow that the entire target reg is being set. */
1429 emit_clobber (target
);
1431 last
= get_last_insn ();
1432 for (i
= 0; i
< nwords
; i
++)
1434 /* If I is 0, use the low-order word in both field and target;
1435 if I is 1, use the next to lowest word; and so on. */
1436 /* Word number in TARGET to use. */
1437 unsigned int wordnum
1439 ? GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
- i
- 1
1441 /* Offset from start of field in OP0. */
1442 unsigned int bit_offset
= (WORDS_BIG_ENDIAN
1443 ? MAX (0, ((int) bitsize
- ((int) i
+ 1)
1444 * (int) BITS_PER_WORD
))
1445 : (int) i
* BITS_PER_WORD
);
1446 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1448 = extract_bit_field_1 (op0
, MIN (BITS_PER_WORD
,
1449 bitsize
- i
* BITS_PER_WORD
),
1450 bitnum
+ bit_offset
, 1, false, target_part
,
1451 mode
, word_mode
, fallback_p
);
1453 gcc_assert (target_part
);
1456 delete_insns_since (last
);
1460 if (result_part
!= target_part
)
1461 emit_move_insn (target_part
, result_part
);
1466 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1467 need to be zero'd out. */
1468 if (GET_MODE_SIZE (GET_MODE (target
)) > nwords
* UNITS_PER_WORD
)
1470 unsigned int i
, total_words
;
1472 total_words
= GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
;
1473 for (i
= nwords
; i
< total_words
; i
++)
1475 (operand_subword (target
,
1476 WORDS_BIG_ENDIAN
? total_words
- i
- 1 : i
,
1483 /* Signed bit field: sign-extend with two arithmetic shifts. */
1484 target
= expand_shift (LSHIFT_EXPR
, mode
, target
,
1485 GET_MODE_BITSIZE (mode
) - bitsize
, NULL_RTX
, 0);
1486 return expand_shift (RSHIFT_EXPR
, mode
, target
,
1487 GET_MODE_BITSIZE (mode
) - bitsize
, NULL_RTX
, 0);
1490 /* If OP0 is a multi-word register, narrow it to the affected word.
1491 If the region spans two words, defer to extract_split_bit_field. */
1492 if (!MEM_P (op0
) && GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
1494 op0
= simplify_gen_subreg (word_mode
, op0
, GET_MODE (op0
),
1495 bitnum
/ BITS_PER_WORD
* UNITS_PER_WORD
);
1496 bitnum
%= BITS_PER_WORD
;
1497 if (bitnum
+ bitsize
> BITS_PER_WORD
)
1501 target
= extract_split_bit_field (op0
, bitsize
, bitnum
, unsignedp
);
1502 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1506 /* From here on we know the desired field is smaller than a word.
1507 If OP0 is a register, it too fits within a word. */
1509 ext_mode
= mode_for_extraction (unsignedp
? EP_extzv
: EP_extv
, 0);
1510 if (ext_mode
!= MAX_MACHINE_MODE
1512 && GET_MODE_BITSIZE (ext_mode
) >= bitsize
1513 /* Do not use extv/extzv for volatile bitfields when
1514 -fstrict-volatile-bitfields is in effect. */
1515 && !(MEM_P (op0
) && MEM_VOLATILE_P (op0
)
1516 && flag_strict_volatile_bitfields
> 0)
1517 /* If op0 is a register, we need it in EXT_MODE to make it
1518 acceptable to the format of ext(z)v. */
1519 && !(GET_CODE (op0
) == SUBREG
&& GET_MODE (op0
) != ext_mode
))
1521 struct expand_operand ops
[4];
1522 unsigned HOST_WIDE_INT bitpos
= bitnum
;
1524 rtx xtarget
= target
;
1525 rtx xspec_target
= target
;
1526 rtx xspec_target_subreg
= 0;
1527 unsigned unit
= GET_MODE_BITSIZE (ext_mode
);
1529 /* If op0 is a register, we need it in EXT_MODE to make it
1530 acceptable to the format of ext(z)v. */
1531 if (REG_P (xop0
) && GET_MODE (xop0
) != ext_mode
)
1532 xop0
= gen_lowpart_SUBREG (ext_mode
, xop0
);
1536 /* Get a reference to the first byte of the field. */
1537 xop0
= adjust_bitfield_address (xop0
, byte_mode
,
1538 bitpos
/ BITS_PER_UNIT
);
1539 bitpos
%= BITS_PER_UNIT
;
1543 /* Convert from counting within OP0 to counting in EXT_MODE. */
1544 if (BYTES_BIG_ENDIAN
)
1545 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
1548 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
1549 "backwards" from the size of the unit we are extracting from.
1550 Otherwise, we count bits from the most significant on a
1551 BYTES/BITS_BIG_ENDIAN machine. */
1553 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1554 bitpos
= unit
- bitsize
- bitpos
;
1557 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1559 if (GET_MODE (xtarget
) != ext_mode
)
1561 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1562 between the mode of the extraction (word_mode) and the target
1563 mode. Instead, create a temporary and use convert_move to set
1566 && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (xtarget
), ext_mode
))
1568 xtarget
= gen_lowpart (ext_mode
, xtarget
);
1569 if (GET_MODE_PRECISION (ext_mode
)
1570 > GET_MODE_PRECISION (GET_MODE (xspec_target
)))
1571 xspec_target_subreg
= xtarget
;
1574 xtarget
= gen_reg_rtx (ext_mode
);
1577 create_output_operand (&ops
[0], xtarget
, ext_mode
);
1578 create_fixed_operand (&ops
[1], xop0
);
1579 create_integer_operand (&ops
[2], bitsize
);
1580 create_integer_operand (&ops
[3], bitpos
);
1581 if (maybe_expand_insn (unsignedp
? CODE_FOR_extzv
: CODE_FOR_extv
,
1584 xtarget
= ops
[0].value
;
1585 if (xtarget
== xspec_target
)
1587 if (xtarget
== xspec_target_subreg
)
1588 return xspec_target
;
1589 return convert_extracted_bit_field (xtarget
, mode
, tmode
, unsignedp
);
1593 /* If OP0 is a memory, try copying it to a register and seeing if a
1594 cheap register alternative is available. */
1595 if (ext_mode
!= MAX_MACHINE_MODE
&& MEM_P (op0
))
1597 enum machine_mode bestmode
;
1599 /* Get the mode to use for inserting into this field. If
1600 OP0 is BLKmode, get the smallest mode consistent with the
1601 alignment. If OP0 is a non-BLKmode object that is no
1602 wider than EXT_MODE, use its mode. Otherwise, use the
1603 smallest mode containing the field. */
1605 if (GET_MODE (op0
) == BLKmode
1606 || (ext_mode
!= MAX_MACHINE_MODE
1607 && GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (ext_mode
)))
1608 bestmode
= get_best_mode (bitsize
, bitnum
, 0, 0, MEM_ALIGN (op0
),
1609 (ext_mode
== MAX_MACHINE_MODE
1610 ? VOIDmode
: ext_mode
),
1611 MEM_VOLATILE_P (op0
));
1613 bestmode
= GET_MODE (op0
);
1615 if (bestmode
!= VOIDmode
1616 && !(SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (op0
))
1617 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (op0
)))
1619 unsigned HOST_WIDE_INT offset
, bitpos
;
1621 /* Compute the offset as a multiple of this unit,
1622 counting in bytes. */
1623 unsigned int unit
= GET_MODE_BITSIZE (bestmode
);
1624 offset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1625 bitpos
= bitnum
% unit
;
1627 /* Make sure the register is big enough for the whole field. */
1628 if (bitpos
+ bitsize
<= unit
)
1630 rtx last
, result
, xop0
;
1632 last
= get_last_insn ();
1634 /* Fetch it to a register in that size. */
1635 xop0
= adjust_bitfield_address (op0
, bestmode
, offset
);
1636 xop0
= force_reg (bestmode
, xop0
);
1637 result
= extract_bit_field_1 (xop0
, bitsize
, bitpos
,
1638 unsignedp
, packedp
, target
,
1639 mode
, tmode
, false);
1643 delete_insns_since (last
);
1651 /* Find a correspondingly-sized integer field, so we can apply
1652 shifts and masks to it. */
1653 int_mode
= int_mode_for_mode (tmode
);
1654 if (int_mode
== BLKmode
)
1655 int_mode
= int_mode_for_mode (mode
);
1656 /* Should probably push op0 out to memory and then do a load. */
1657 gcc_assert (int_mode
!= BLKmode
);
1659 target
= extract_fixed_bit_field (int_mode
, op0
, bitsize
, bitnum
,
1660 target
, unsignedp
, packedp
);
1661 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1664 /* Generate code to extract a byte-field from STR_RTX
1665 containing BITSIZE bits, starting at BITNUM,
1666 and put it in TARGET if possible (if TARGET is nonzero).
1667 Regardless of TARGET, we return the rtx for where the value is placed.
1669 STR_RTX is the structure containing the byte (a REG or MEM).
1670 UNSIGNEDP is nonzero if this is an unsigned bit field.
1671 PACKEDP is nonzero if the field has the packed attribute.
1672 MODE is the natural mode of the field value once extracted.
1673 TMODE is the mode the caller would like the value to have;
1674 but the value may be returned with type MODE instead.
1676 If a TARGET is specified and we can store in it at no extra cost,
1677 we do so, and return TARGET.
1678 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1679 if they are equally easy. */
1682 extract_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1683 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, bool packedp
,
1684 rtx target
, enum machine_mode mode
, enum machine_mode tmode
)
1686 return extract_bit_field_1 (str_rtx
, bitsize
, bitnum
, unsignedp
, packedp
,
1687 target
, mode
, tmode
, true);
1690 /* Use shifts and boolean operations to extract a field of BITSIZE bits
1691 from bit BITNUM of OP0.
1693 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1694 PACKEDP is true if the field has the packed attribute.
1696 If TARGET is nonzero, attempts to store the value there
1697 and return TARGET, but this is not guaranteed.
1698 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1701 extract_fixed_bit_field (enum machine_mode tmode
, rtx op0
,
1702 unsigned HOST_WIDE_INT bitsize
,
1703 unsigned HOST_WIDE_INT bitnum
, rtx target
,
1704 int unsignedp
, bool packedp
)
1706 enum machine_mode mode
;
1710 /* Get the proper mode to use for this field. We want a mode that
1711 includes the entire field. If such a mode would be larger than
1712 a word, we won't be doing the extraction the normal way. */
1714 if (MEM_VOLATILE_P (op0
)
1715 && flag_strict_volatile_bitfields
> 0)
1717 if (GET_MODE_BITSIZE (GET_MODE (op0
)) > 0)
1718 mode
= GET_MODE (op0
);
1719 else if (target
&& GET_MODE_BITSIZE (GET_MODE (target
)) > 0)
1720 mode
= GET_MODE (target
);
1725 mode
= get_best_mode (bitsize
, bitnum
, 0, 0,
1726 MEM_ALIGN (op0
), word_mode
, MEM_VOLATILE_P (op0
));
1728 if (mode
== VOIDmode
)
1729 /* The only way this should occur is if the field spans word
1731 return extract_split_bit_field (op0
, bitsize
, bitnum
, unsignedp
);
1733 unsigned int total_bits
= GET_MODE_BITSIZE (mode
);
1734 HOST_WIDE_INT bit_offset
= bitnum
- bitnum
% total_bits
;
1736 /* If we're accessing a volatile MEM, we can't apply BIT_OFFSET
1737 if it results in a multi-word access where we otherwise wouldn't
1738 have one. So, check for that case here. */
1740 && MEM_VOLATILE_P (op0
)
1741 && flag_strict_volatile_bitfields
> 0
1742 && bitnum
% BITS_PER_UNIT
+ bitsize
<= total_bits
1743 && bitnum
% GET_MODE_BITSIZE (mode
) + bitsize
> total_bits
)
1745 if (STRICT_ALIGNMENT
)
1747 static bool informed_about_misalignment
= false;
1751 if (bitsize
== total_bits
)
1752 warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1753 "multiple accesses to volatile structure"
1754 " member because of packed attribute");
1756 warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1757 "multiple accesses to volatile structure"
1758 " bitfield because of packed attribute");
1760 return extract_split_bit_field (op0
, bitsize
, bitnum
,
1764 if (bitsize
== total_bits
)
1765 warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1766 "mis-aligned access used for structure member");
1768 warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1769 "mis-aligned access used for structure bitfield");
1771 if (! informed_about_misalignment
)
1773 informed_about_misalignment
= true;
1774 inform (input_location
,
1775 "when a volatile object spans multiple type-sized"
1776 " locations, the compiler must choose between using"
1777 " a single mis-aligned access to preserve the"
1778 " volatility, or using multiple aligned accesses"
1779 " to avoid runtime faults; this code may fail at"
1780 " runtime if the hardware does not allow this"
1784 bit_offset
= bitnum
- bitnum
% BITS_PER_UNIT
;
1786 op0
= adjust_bitfield_address (op0
, mode
, bit_offset
/ BITS_PER_UNIT
);
1787 bitnum
-= bit_offset
;
1790 mode
= GET_MODE (op0
);
1791 gcc_assert (SCALAR_INT_MODE_P (mode
));
1793 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1794 for invalid input, such as extract equivalent of f5 from
1795 gcc.dg/pr48335-2.c. */
1797 if (BYTES_BIG_ENDIAN
)
1798 /* BITNUM is the distance between our msb and that of OP0.
1799 Convert it to the distance from the lsb. */
1800 bitnum
= GET_MODE_BITSIZE (mode
) - bitsize
- bitnum
;
1802 /* Now BITNUM is always the distance between the field's lsb and that of OP0.
1803 We have reduced the big-endian case to the little-endian case. */
1809 /* If the field does not already start at the lsb,
1810 shift it so it does. */
1811 /* Maybe propagate the target for the shift. */
1812 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1815 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, bitnum
, subtarget
, 1);
1817 /* Convert the value to the desired mode. */
1819 op0
= convert_to_mode (tmode
, op0
, 1);
1821 /* Unless the msb of the field used to be the msb when we shifted,
1822 mask out the upper bits. */
1824 if (GET_MODE_BITSIZE (mode
) != bitnum
+ bitsize
)
1825 return expand_binop (GET_MODE (op0
), and_optab
, op0
,
1826 mask_rtx (GET_MODE (op0
), 0, bitsize
, 0),
1827 target
, 1, OPTAB_LIB_WIDEN
);
1831 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1832 then arithmetic-shift its lsb to the lsb of the word. */
1833 op0
= force_reg (mode
, op0
);
1835 /* Find the narrowest integer mode that contains the field. */
1837 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1838 mode
= GET_MODE_WIDER_MODE (mode
))
1839 if (GET_MODE_BITSIZE (mode
) >= bitsize
+ bitnum
)
1841 op0
= convert_to_mode (mode
, op0
, 0);
1848 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitnum
))
1850 int amount
= GET_MODE_BITSIZE (mode
) - (bitsize
+ bitnum
);
1851 /* Maybe propagate the target for the shift. */
1852 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1853 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1856 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
1857 GET_MODE_BITSIZE (mode
) - bitsize
, target
, 0);
1860 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1861 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1862 complement of that if COMPLEMENT. The mask is truncated if
1863 necessary to the width of mode MODE. The mask is zero-extended if
1864 BITSIZE+BITPOS is too small for MODE. */
1867 mask_rtx (enum machine_mode mode
, int bitpos
, int bitsize
, int complement
)
1871 mask
= double_int::mask (bitsize
);
1872 mask
= mask
.llshift (bitpos
, HOST_BITS_PER_DOUBLE_INT
);
1877 return immed_double_int_const (mask
, mode
);
1880 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1881 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1884 lshift_value (enum machine_mode mode
, rtx value
, int bitpos
, int bitsize
)
1888 val
= double_int::from_uhwi (INTVAL (value
)).zext (bitsize
);
1889 val
= val
.llshift (bitpos
, HOST_BITS_PER_DOUBLE_INT
);
1891 return immed_double_int_const (val
, mode
);
1894 /* Extract a bit field that is split across two words
1895 and return an RTX for the result.
1897 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1898 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1899 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1902 extract_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1903 unsigned HOST_WIDE_INT bitpos
, int unsignedp
)
1906 unsigned int bitsdone
= 0;
1907 rtx result
= NULL_RTX
;
1910 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1912 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1913 unit
= BITS_PER_WORD
;
1915 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1917 while (bitsdone
< bitsize
)
1919 unsigned HOST_WIDE_INT thissize
;
1921 unsigned HOST_WIDE_INT thispos
;
1922 unsigned HOST_WIDE_INT offset
;
1924 offset
= (bitpos
+ bitsdone
) / unit
;
1925 thispos
= (bitpos
+ bitsdone
) % unit
;
1927 /* THISSIZE must not overrun a word boundary. Otherwise,
1928 extract_fixed_bit_field will call us again, and we will mutually
1930 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1931 thissize
= MIN (thissize
, unit
- thispos
);
1933 /* If OP0 is a register, then handle OFFSET here.
1935 When handling multiword bitfields, extract_bit_field may pass
1936 down a word_mode SUBREG of a larger REG for a bitfield that actually
1937 crosses a word boundary. Thus, for a SUBREG, we must find
1938 the current word starting from the base register. */
1939 if (GET_CODE (op0
) == SUBREG
)
1941 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1942 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1943 GET_MODE (SUBREG_REG (op0
)));
1946 else if (REG_P (op0
))
1948 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1954 /* Extract the parts in bit-counting order,
1955 whose meaning is determined by BYTES_PER_UNIT.
1956 OFFSET is in UNITs, and UNIT is in bits. */
1957 part
= extract_fixed_bit_field (word_mode
, word
, thissize
,
1958 offset
* unit
+ thispos
, 0, 1, false);
1959 bitsdone
+= thissize
;
1961 /* Shift this part into place for the result. */
1962 if (BYTES_BIG_ENDIAN
)
1964 if (bitsize
!= bitsdone
)
1965 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1966 bitsize
- bitsdone
, 0, 1);
1970 if (bitsdone
!= thissize
)
1971 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1972 bitsdone
- thissize
, 0, 1);
1978 /* Combine the parts with bitwise or. This works
1979 because we extracted each part as an unsigned bit field. */
1980 result
= expand_binop (word_mode
, ior_optab
, part
, result
, NULL_RTX
, 1,
1986 /* Unsigned bit field: we are done. */
1989 /* Signed bit field: sign-extend with two arithmetic shifts. */
1990 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
1991 BITS_PER_WORD
- bitsize
, NULL_RTX
, 0);
1992 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
1993 BITS_PER_WORD
- bitsize
, NULL_RTX
, 0);
1996 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
1997 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
1998 MODE, fill the upper bits with zeros. Fail if the layout of either
1999 mode is unknown (as for CC modes) or if the extraction would involve
2000 unprofitable mode punning. Return the value on success, otherwise
2003 This is different from gen_lowpart* in these respects:
2005 - the returned value must always be considered an rvalue
2007 - when MODE is wider than SRC_MODE, the extraction involves
2010 - when MODE is smaller than SRC_MODE, the extraction involves
2011 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
2013 In other words, this routine performs a computation, whereas the
2014 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2018 extract_low_bits (enum machine_mode mode
, enum machine_mode src_mode
, rtx src
)
2020 enum machine_mode int_mode
, src_int_mode
;
2022 if (mode
== src_mode
)
2025 if (CONSTANT_P (src
))
2027 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2028 fails, it will happily create (subreg (symbol_ref)) or similar
2030 unsigned int byte
= subreg_lowpart_offset (mode
, src_mode
);
2031 rtx ret
= simplify_subreg (mode
, src
, src_mode
, byte
);
2035 if (GET_MODE (src
) == VOIDmode
2036 || !validate_subreg (mode
, src_mode
, src
, byte
))
2039 src
= force_reg (GET_MODE (src
), src
);
2040 return gen_rtx_SUBREG (mode
, src
, byte
);
2043 if (GET_MODE_CLASS (mode
) == MODE_CC
|| GET_MODE_CLASS (src_mode
) == MODE_CC
)
2046 if (GET_MODE_BITSIZE (mode
) == GET_MODE_BITSIZE (src_mode
)
2047 && MODES_TIEABLE_P (mode
, src_mode
))
2049 rtx x
= gen_lowpart_common (mode
, src
);
2054 src_int_mode
= int_mode_for_mode (src_mode
);
2055 int_mode
= int_mode_for_mode (mode
);
2056 if (src_int_mode
== BLKmode
|| int_mode
== BLKmode
)
2059 if (!MODES_TIEABLE_P (src_int_mode
, src_mode
))
2061 if (!MODES_TIEABLE_P (int_mode
, mode
))
2064 src
= gen_lowpart (src_int_mode
, src
);
2065 src
= convert_modes (int_mode
, src_int_mode
, src
, true);
2066 src
= gen_lowpart (mode
, src
);
2070 /* Add INC into TARGET. */
2073 expand_inc (rtx target
, rtx inc
)
2075 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
2077 target
, 0, OPTAB_LIB_WIDEN
);
2078 if (value
!= target
)
2079 emit_move_insn (target
, value
);
2082 /* Subtract DEC from TARGET. */
2085 expand_dec (rtx target
, rtx dec
)
2087 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
2089 target
, 0, OPTAB_LIB_WIDEN
);
2090 if (value
!= target
)
2091 emit_move_insn (target
, value
);
2094 /* Output a shift instruction for expression code CODE,
2095 with SHIFTED being the rtx for the value to shift,
2096 and AMOUNT the rtx for the amount to shift by.
2097 Store the result in the rtx TARGET, if that is convenient.
2098 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2099 Return the rtx for where the value is. */
2102 expand_shift_1 (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2103 rtx amount
, rtx target
, int unsignedp
)
2106 int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
2107 int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
2108 optab lshift_optab
= ashl_optab
;
2109 optab rshift_arith_optab
= ashr_optab
;
2110 optab rshift_uns_optab
= lshr_optab
;
2111 optab lrotate_optab
= rotl_optab
;
2112 optab rrotate_optab
= rotr_optab
;
2113 enum machine_mode op1_mode
;
2115 bool speed
= optimize_insn_for_speed_p ();
2118 op1_mode
= GET_MODE (op1
);
2120 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2121 shift amount is a vector, use the vector/vector shift patterns. */
2122 if (VECTOR_MODE_P (mode
) && VECTOR_MODE_P (op1_mode
))
2124 lshift_optab
= vashl_optab
;
2125 rshift_arith_optab
= vashr_optab
;
2126 rshift_uns_optab
= vlshr_optab
;
2127 lrotate_optab
= vrotl_optab
;
2128 rrotate_optab
= vrotr_optab
;
2131 /* Previously detected shift-counts computed by NEGATE_EXPR
2132 and shifted in the other direction; but that does not work
2135 if (SHIFT_COUNT_TRUNCATED
)
2137 if (CONST_INT_P (op1
)
2138 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
) >=
2139 (unsigned HOST_WIDE_INT
) GET_MODE_BITSIZE (mode
)))
2140 op1
= GEN_INT ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
2141 % GET_MODE_BITSIZE (mode
));
2142 else if (GET_CODE (op1
) == SUBREG
2143 && subreg_lowpart_p (op1
)
2144 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (op1
))))
2145 op1
= SUBREG_REG (op1
);
2148 if (op1
== const0_rtx
)
2151 /* Check whether its cheaper to implement a left shift by a constant
2152 bit count by a sequence of additions. */
2153 if (code
== LSHIFT_EXPR
2154 && CONST_INT_P (op1
)
2156 && INTVAL (op1
) < GET_MODE_PRECISION (mode
)
2157 && INTVAL (op1
) < MAX_BITS_PER_WORD
2158 && (shift_cost (speed
, mode
, INTVAL (op1
))
2159 > INTVAL (op1
) * add_cost (speed
, mode
))
2160 && shift_cost (speed
, mode
, INTVAL (op1
)) != MAX_COST
)
2163 for (i
= 0; i
< INTVAL (op1
); i
++)
2165 temp
= force_reg (mode
, shifted
);
2166 shifted
= expand_binop (mode
, add_optab
, temp
, temp
, NULL_RTX
,
2167 unsignedp
, OPTAB_LIB_WIDEN
);
2172 for (attempt
= 0; temp
== 0 && attempt
< 3; attempt
++)
2174 enum optab_methods methods
;
2177 methods
= OPTAB_DIRECT
;
2178 else if (attempt
== 1)
2179 methods
= OPTAB_WIDEN
;
2181 methods
= OPTAB_LIB_WIDEN
;
2185 /* Widening does not work for rotation. */
2186 if (methods
== OPTAB_WIDEN
)
2188 else if (methods
== OPTAB_LIB_WIDEN
)
2190 /* If we have been unable to open-code this by a rotation,
2191 do it as the IOR of two shifts. I.e., to rotate A
2192 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2193 where C is the bitsize of A.
2195 It is theoretically possible that the target machine might
2196 not be able to perform either shift and hence we would
2197 be making two libcalls rather than just the one for the
2198 shift (similarly if IOR could not be done). We will allow
2199 this extremely unlikely lossage to avoid complicating the
2202 rtx subtarget
= target
== shifted
? 0 : target
;
2203 rtx new_amount
, other_amount
;
2207 if (CONST_INT_P (op1
))
2208 other_amount
= GEN_INT (GET_MODE_BITSIZE (mode
)
2212 = simplify_gen_binary (MINUS
, GET_MODE (op1
),
2213 GEN_INT (GET_MODE_PRECISION (mode
)),
2216 shifted
= force_reg (mode
, shifted
);
2218 temp
= expand_shift_1 (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
2219 mode
, shifted
, new_amount
, 0, 1);
2220 temp1
= expand_shift_1 (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
2221 mode
, shifted
, other_amount
,
2223 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
2224 unsignedp
, methods
);
2227 temp
= expand_binop (mode
,
2228 left
? lrotate_optab
: rrotate_optab
,
2229 shifted
, op1
, target
, unsignedp
, methods
);
2232 temp
= expand_binop (mode
,
2233 left
? lshift_optab
: rshift_uns_optab
,
2234 shifted
, op1
, target
, unsignedp
, methods
);
2236 /* Do arithmetic shifts.
2237 Also, if we are going to widen the operand, we can just as well
2238 use an arithmetic right-shift instead of a logical one. */
2239 if (temp
== 0 && ! rotate
2240 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
2242 enum optab_methods methods1
= methods
;
2244 /* If trying to widen a log shift to an arithmetic shift,
2245 don't accept an arithmetic shift of the same size. */
2247 methods1
= OPTAB_MUST_WIDEN
;
2249 /* Arithmetic shift */
2251 temp
= expand_binop (mode
,
2252 left
? lshift_optab
: rshift_arith_optab
,
2253 shifted
, op1
, target
, unsignedp
, methods1
);
2256 /* We used to try extzv here for logical right shifts, but that was
2257 only useful for one machine, the VAX, and caused poor code
2258 generation there for lshrdi3, so the code was deleted and a
2259 define_expand for lshrsi3 was added to vax.md. */
2266 /* Output a shift instruction for expression code CODE,
2267 with SHIFTED being the rtx for the value to shift,
2268 and AMOUNT the amount to shift by.
2269 Store the result in the rtx TARGET, if that is convenient.
2270 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2271 Return the rtx for where the value is. */
2274 expand_shift (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2275 int amount
, rtx target
, int unsignedp
)
2277 return expand_shift_1 (code
, mode
,
2278 shifted
, GEN_INT (amount
), target
, unsignedp
);
2281 /* Output a shift instruction for expression code CODE,
2282 with SHIFTED being the rtx for the value to shift,
2283 and AMOUNT the tree for the amount to shift by.
2284 Store the result in the rtx TARGET, if that is convenient.
2285 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2286 Return the rtx for where the value is. */
2289 expand_variable_shift (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2290 tree amount
, rtx target
, int unsignedp
)
2292 return expand_shift_1 (code
, mode
,
2293 shifted
, expand_normal (amount
), target
, unsignedp
);
2297 /* Indicates the type of fixup needed after a constant multiplication.
2298 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2299 the result should be negated, and ADD_VARIANT means that the
2300 multiplicand should be added to the result. */
2301 enum mult_variant
{basic_variant
, negate_variant
, add_variant
};
2303 static void synth_mult (struct algorithm
*, unsigned HOST_WIDE_INT
,
2304 const struct mult_cost
*, enum machine_mode mode
);
2305 static bool choose_mult_variant (enum machine_mode
, HOST_WIDE_INT
,
2306 struct algorithm
*, enum mult_variant
*, int);
2307 static rtx
expand_mult_const (enum machine_mode
, rtx
, HOST_WIDE_INT
, rtx
,
2308 const struct algorithm
*, enum mult_variant
);
2309 static unsigned HOST_WIDE_INT
invert_mod2n (unsigned HOST_WIDE_INT
, int);
2310 static rtx
extract_high_half (enum machine_mode
, rtx
);
2311 static rtx
expmed_mult_highpart (enum machine_mode
, rtx
, rtx
, rtx
, int, int);
2312 static rtx
expmed_mult_highpart_optab (enum machine_mode
, rtx
, rtx
, rtx
,
2314 /* Compute and return the best algorithm for multiplying by T.
2315 The algorithm must cost less than cost_limit
2316 If retval.cost >= COST_LIMIT, no algorithm was found and all
2317 other field of the returned struct are undefined.
2318 MODE is the machine mode of the multiplication. */
2321 synth_mult (struct algorithm
*alg_out
, unsigned HOST_WIDE_INT t
,
2322 const struct mult_cost
*cost_limit
, enum machine_mode mode
)
2325 struct algorithm
*alg_in
, *best_alg
;
2326 struct mult_cost best_cost
;
2327 struct mult_cost new_limit
;
2328 int op_cost
, op_latency
;
2329 unsigned HOST_WIDE_INT orig_t
= t
;
2330 unsigned HOST_WIDE_INT q
;
2331 int maxm
, hash_index
;
2332 bool cache_hit
= false;
2333 enum alg_code cache_alg
= alg_zero
;
2334 bool speed
= optimize_insn_for_speed_p ();
2335 enum machine_mode imode
;
2336 struct alg_hash_entry
*entry_ptr
;
2338 /* Indicate that no algorithm is yet found. If no algorithm
2339 is found, this value will be returned and indicate failure. */
2340 alg_out
->cost
.cost
= cost_limit
->cost
+ 1;
2341 alg_out
->cost
.latency
= cost_limit
->latency
+ 1;
2343 if (cost_limit
->cost
< 0
2344 || (cost_limit
->cost
== 0 && cost_limit
->latency
<= 0))
2347 /* Be prepared for vector modes. */
2348 imode
= GET_MODE_INNER (mode
);
2349 if (imode
== VOIDmode
)
2352 maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (imode
));
2354 /* Restrict the bits of "t" to the multiplication's mode. */
2355 t
&= GET_MODE_MASK (imode
);
2357 /* t == 1 can be done in zero cost. */
2361 alg_out
->cost
.cost
= 0;
2362 alg_out
->cost
.latency
= 0;
2363 alg_out
->op
[0] = alg_m
;
2367 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2371 if (MULT_COST_LESS (cost_limit
, zero_cost (speed
)))
2376 alg_out
->cost
.cost
= zero_cost (speed
);
2377 alg_out
->cost
.latency
= zero_cost (speed
);
2378 alg_out
->op
[0] = alg_zero
;
2383 /* We'll be needing a couple extra algorithm structures now. */
2385 alg_in
= XALLOCA (struct algorithm
);
2386 best_alg
= XALLOCA (struct algorithm
);
2387 best_cost
= *cost_limit
;
2389 /* Compute the hash index. */
2390 hash_index
= (t
^ (unsigned int) mode
^ (speed
* 256)) % NUM_ALG_HASH_ENTRIES
;
2392 /* See if we already know what to do for T. */
2393 entry_ptr
= alg_hash_entry_ptr (hash_index
);
2394 if (entry_ptr
->t
== t
2395 && entry_ptr
->mode
== mode
2396 && entry_ptr
->mode
== mode
2397 && entry_ptr
->speed
== speed
2398 && entry_ptr
->alg
!= alg_unknown
)
2400 cache_alg
= entry_ptr
->alg
;
2402 if (cache_alg
== alg_impossible
)
2404 /* The cache tells us that it's impossible to synthesize
2405 multiplication by T within entry_ptr->cost. */
2406 if (!CHEAPER_MULT_COST (&entry_ptr
->cost
, cost_limit
))
2407 /* COST_LIMIT is at least as restrictive as the one
2408 recorded in the hash table, in which case we have no
2409 hope of synthesizing a multiplication. Just
2413 /* If we get here, COST_LIMIT is less restrictive than the
2414 one recorded in the hash table, so we may be able to
2415 synthesize a multiplication. Proceed as if we didn't
2416 have the cache entry. */
2420 if (CHEAPER_MULT_COST (cost_limit
, &entry_ptr
->cost
))
2421 /* The cached algorithm shows that this multiplication
2422 requires more cost than COST_LIMIT. Just return. This
2423 way, we don't clobber this cache entry with
2424 alg_impossible but retain useful information. */
2436 goto do_alg_addsub_t_m2
;
2438 case alg_add_factor
:
2439 case alg_sub_factor
:
2440 goto do_alg_addsub_factor
;
2443 goto do_alg_add_t2_m
;
2446 goto do_alg_sub_t2_m
;
2454 /* If we have a group of zero bits at the low-order part of T, try
2455 multiplying by the remaining bits and then doing a shift. */
2460 m
= floor_log2 (t
& -t
); /* m = number of low zero bits */
2464 /* The function expand_shift will choose between a shift and
2465 a sequence of additions, so the observed cost is given as
2466 MIN (m * add_cost(speed, mode), shift_cost(speed, mode, m)). */
2467 op_cost
= m
* add_cost (speed
, mode
);
2468 if (shift_cost (speed
, mode
, m
) < op_cost
)
2469 op_cost
= shift_cost (speed
, mode
, m
);
2470 new_limit
.cost
= best_cost
.cost
- op_cost
;
2471 new_limit
.latency
= best_cost
.latency
- op_cost
;
2472 synth_mult (alg_in
, q
, &new_limit
, mode
);
2474 alg_in
->cost
.cost
+= op_cost
;
2475 alg_in
->cost
.latency
+= op_cost
;
2476 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2478 struct algorithm
*x
;
2479 best_cost
= alg_in
->cost
;
2480 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2481 best_alg
->log
[best_alg
->ops
] = m
;
2482 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2485 /* See if treating ORIG_T as a signed number yields a better
2486 sequence. Try this sequence only for a negative ORIG_T
2487 as it would be useless for a non-negative ORIG_T. */
2488 if ((HOST_WIDE_INT
) orig_t
< 0)
2490 /* Shift ORIG_T as follows because a right shift of a
2491 negative-valued signed type is implementation
2493 q
= ~(~orig_t
>> m
);
2494 /* The function expand_shift will choose between a shift
2495 and a sequence of additions, so the observed cost is
2496 given as MIN (m * add_cost(speed, mode),
2497 shift_cost(speed, mode, m)). */
2498 op_cost
= m
* add_cost (speed
, mode
);
2499 if (shift_cost (speed
, mode
, m
) < op_cost
)
2500 op_cost
= shift_cost (speed
, mode
, m
);
2501 new_limit
.cost
= best_cost
.cost
- op_cost
;
2502 new_limit
.latency
= best_cost
.latency
- op_cost
;
2503 synth_mult (alg_in
, q
, &new_limit
, mode
);
2505 alg_in
->cost
.cost
+= op_cost
;
2506 alg_in
->cost
.latency
+= op_cost
;
2507 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2509 struct algorithm
*x
;
2510 best_cost
= alg_in
->cost
;
2511 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2512 best_alg
->log
[best_alg
->ops
] = m
;
2513 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2521 /* If we have an odd number, add or subtract one. */
2524 unsigned HOST_WIDE_INT w
;
2527 for (w
= 1; (w
& t
) != 0; w
<<= 1)
2529 /* If T was -1, then W will be zero after the loop. This is another
2530 case where T ends with ...111. Handling this with (T + 1) and
2531 subtract 1 produces slightly better code and results in algorithm
2532 selection much faster than treating it like the ...0111 case
2536 /* Reject the case where t is 3.
2537 Thus we prefer addition in that case. */
2540 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2542 op_cost
= add_cost (speed
, mode
);
2543 new_limit
.cost
= best_cost
.cost
- op_cost
;
2544 new_limit
.latency
= best_cost
.latency
- op_cost
;
2545 synth_mult (alg_in
, t
+ 1, &new_limit
, mode
);
2547 alg_in
->cost
.cost
+= op_cost
;
2548 alg_in
->cost
.latency
+= op_cost
;
2549 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2551 struct algorithm
*x
;
2552 best_cost
= alg_in
->cost
;
2553 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2554 best_alg
->log
[best_alg
->ops
] = 0;
2555 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2560 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2562 op_cost
= add_cost (speed
, mode
);
2563 new_limit
.cost
= best_cost
.cost
- op_cost
;
2564 new_limit
.latency
= best_cost
.latency
- op_cost
;
2565 synth_mult (alg_in
, t
- 1, &new_limit
, mode
);
2567 alg_in
->cost
.cost
+= op_cost
;
2568 alg_in
->cost
.latency
+= op_cost
;
2569 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2571 struct algorithm
*x
;
2572 best_cost
= alg_in
->cost
;
2573 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2574 best_alg
->log
[best_alg
->ops
] = 0;
2575 best_alg
->op
[best_alg
->ops
] = alg_add_t_m2
;
2579 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2580 quickly with a - a * n for some appropriate constant n. */
2581 m
= exact_log2 (-orig_t
+ 1);
2582 if (m
>= 0 && m
< maxm
)
2584 op_cost
= shiftsub1_cost (speed
, mode
, m
);
2585 new_limit
.cost
= best_cost
.cost
- op_cost
;
2586 new_limit
.latency
= best_cost
.latency
- op_cost
;
2587 synth_mult (alg_in
, (unsigned HOST_WIDE_INT
) (-orig_t
+ 1) >> m
,
2590 alg_in
->cost
.cost
+= op_cost
;
2591 alg_in
->cost
.latency
+= op_cost
;
2592 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2594 struct algorithm
*x
;
2595 best_cost
= alg_in
->cost
;
2596 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2597 best_alg
->log
[best_alg
->ops
] = m
;
2598 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2606 /* Look for factors of t of the form
2607 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2608 If we find such a factor, we can multiply by t using an algorithm that
2609 multiplies by q, shift the result by m and add/subtract it to itself.
2611 We search for large factors first and loop down, even if large factors
2612 are less probable than small; if we find a large factor we will find a
2613 good sequence quickly, and therefore be able to prune (by decreasing
2614 COST_LIMIT) the search. */
2616 do_alg_addsub_factor
:
2617 for (m
= floor_log2 (t
- 1); m
>= 2; m
--)
2619 unsigned HOST_WIDE_INT d
;
2621 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) + 1;
2622 if (t
% d
== 0 && t
> d
&& m
< maxm
2623 && (!cache_hit
|| cache_alg
== alg_add_factor
))
2625 /* If the target has a cheap shift-and-add instruction use
2626 that in preference to a shift insn followed by an add insn.
2627 Assume that the shift-and-add is "atomic" with a latency
2628 equal to its cost, otherwise assume that on superscalar
2629 hardware the shift may be executed concurrently with the
2630 earlier steps in the algorithm. */
2631 op_cost
= add_cost (speed
, mode
) + shift_cost (speed
, mode
, m
);
2632 if (shiftadd_cost (speed
, mode
, m
) < op_cost
)
2634 op_cost
= shiftadd_cost (speed
, mode
, m
);
2635 op_latency
= op_cost
;
2638 op_latency
= add_cost (speed
, mode
);
2640 new_limit
.cost
= best_cost
.cost
- op_cost
;
2641 new_limit
.latency
= best_cost
.latency
- op_latency
;
2642 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2644 alg_in
->cost
.cost
+= op_cost
;
2645 alg_in
->cost
.latency
+= op_latency
;
2646 if (alg_in
->cost
.latency
< op_cost
)
2647 alg_in
->cost
.latency
= op_cost
;
2648 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2650 struct algorithm
*x
;
2651 best_cost
= alg_in
->cost
;
2652 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2653 best_alg
->log
[best_alg
->ops
] = m
;
2654 best_alg
->op
[best_alg
->ops
] = alg_add_factor
;
2656 /* Other factors will have been taken care of in the recursion. */
2660 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) - 1;
2661 if (t
% d
== 0 && t
> d
&& m
< maxm
2662 && (!cache_hit
|| cache_alg
== alg_sub_factor
))
2664 /* If the target has a cheap shift-and-subtract insn use
2665 that in preference to a shift insn followed by a sub insn.
2666 Assume that the shift-and-sub is "atomic" with a latency
2667 equal to it's cost, otherwise assume that on superscalar
2668 hardware the shift may be executed concurrently with the
2669 earlier steps in the algorithm. */
2670 op_cost
= add_cost (speed
, mode
) + shift_cost (speed
, mode
, m
);
2671 if (shiftsub0_cost (speed
, mode
, m
) < op_cost
)
2673 op_cost
= shiftsub0_cost (speed
, mode
, m
);
2674 op_latency
= op_cost
;
2677 op_latency
= add_cost (speed
, mode
);
2679 new_limit
.cost
= best_cost
.cost
- op_cost
;
2680 new_limit
.latency
= best_cost
.latency
- op_latency
;
2681 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2683 alg_in
->cost
.cost
+= op_cost
;
2684 alg_in
->cost
.latency
+= op_latency
;
2685 if (alg_in
->cost
.latency
< op_cost
)
2686 alg_in
->cost
.latency
= op_cost
;
2687 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2689 struct algorithm
*x
;
2690 best_cost
= alg_in
->cost
;
2691 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2692 best_alg
->log
[best_alg
->ops
] = m
;
2693 best_alg
->op
[best_alg
->ops
] = alg_sub_factor
;
2701 /* Try shift-and-add (load effective address) instructions,
2702 i.e. do a*3, a*5, a*9. */
2709 if (m
>= 0 && m
< maxm
)
2711 op_cost
= shiftadd_cost (speed
, mode
, m
);
2712 new_limit
.cost
= best_cost
.cost
- op_cost
;
2713 new_limit
.latency
= best_cost
.latency
- op_cost
;
2714 synth_mult (alg_in
, (t
- 1) >> m
, &new_limit
, mode
);
2716 alg_in
->cost
.cost
+= op_cost
;
2717 alg_in
->cost
.latency
+= op_cost
;
2718 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2720 struct algorithm
*x
;
2721 best_cost
= alg_in
->cost
;
2722 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2723 best_alg
->log
[best_alg
->ops
] = m
;
2724 best_alg
->op
[best_alg
->ops
] = alg_add_t2_m
;
2734 if (m
>= 0 && m
< maxm
)
2736 op_cost
= shiftsub0_cost (speed
, mode
, m
);
2737 new_limit
.cost
= best_cost
.cost
- op_cost
;
2738 new_limit
.latency
= best_cost
.latency
- op_cost
;
2739 synth_mult (alg_in
, (t
+ 1) >> m
, &new_limit
, mode
);
2741 alg_in
->cost
.cost
+= op_cost
;
2742 alg_in
->cost
.latency
+= op_cost
;
2743 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2745 struct algorithm
*x
;
2746 best_cost
= alg_in
->cost
;
2747 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2748 best_alg
->log
[best_alg
->ops
] = m
;
2749 best_alg
->op
[best_alg
->ops
] = alg_sub_t2_m
;
2757 /* If best_cost has not decreased, we have not found any algorithm. */
2758 if (!CHEAPER_MULT_COST (&best_cost
, cost_limit
))
2760 /* We failed to find an algorithm. Record alg_impossible for
2761 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2762 we are asked to find an algorithm for T within the same or
2763 lower COST_LIMIT, we can immediately return to the
2766 entry_ptr
->mode
= mode
;
2767 entry_ptr
->speed
= speed
;
2768 entry_ptr
->alg
= alg_impossible
;
2769 entry_ptr
->cost
= *cost_limit
;
2773 /* Cache the result. */
2777 entry_ptr
->mode
= mode
;
2778 entry_ptr
->speed
= speed
;
2779 entry_ptr
->alg
= best_alg
->op
[best_alg
->ops
];
2780 entry_ptr
->cost
.cost
= best_cost
.cost
;
2781 entry_ptr
->cost
.latency
= best_cost
.latency
;
2784 /* If we are getting a too long sequence for `struct algorithm'
2785 to record, make this search fail. */
2786 if (best_alg
->ops
== MAX_BITS_PER_WORD
)
2789 /* Copy the algorithm from temporary space to the space at alg_out.
2790 We avoid using structure assignment because the majority of
2791 best_alg is normally undefined, and this is a critical function. */
2792 alg_out
->ops
= best_alg
->ops
+ 1;
2793 alg_out
->cost
= best_cost
;
2794 memcpy (alg_out
->op
, best_alg
->op
,
2795 alg_out
->ops
* sizeof *alg_out
->op
);
2796 memcpy (alg_out
->log
, best_alg
->log
,
2797 alg_out
->ops
* sizeof *alg_out
->log
);
2800 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2801 Try three variations:
2803 - a shift/add sequence based on VAL itself
2804 - a shift/add sequence based on -VAL, followed by a negation
2805 - a shift/add sequence based on VAL - 1, followed by an addition.
2807 Return true if the cheapest of these cost less than MULT_COST,
2808 describing the algorithm in *ALG and final fixup in *VARIANT. */
2811 choose_mult_variant (enum machine_mode mode
, HOST_WIDE_INT val
,
2812 struct algorithm
*alg
, enum mult_variant
*variant
,
2815 struct algorithm alg2
;
2816 struct mult_cost limit
;
2818 bool speed
= optimize_insn_for_speed_p ();
2820 /* Fail quickly for impossible bounds. */
2824 /* Ensure that mult_cost provides a reasonable upper bound.
2825 Any constant multiplication can be performed with less
2826 than 2 * bits additions. */
2827 op_cost
= 2 * GET_MODE_UNIT_BITSIZE (mode
) * add_cost (speed
, mode
);
2828 if (mult_cost
> op_cost
)
2829 mult_cost
= op_cost
;
2831 *variant
= basic_variant
;
2832 limit
.cost
= mult_cost
;
2833 limit
.latency
= mult_cost
;
2834 synth_mult (alg
, val
, &limit
, mode
);
2836 /* This works only if the inverted value actually fits in an
2838 if (HOST_BITS_PER_INT
>= GET_MODE_UNIT_BITSIZE (mode
))
2840 op_cost
= neg_cost(speed
, mode
);
2841 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2843 limit
.cost
= alg
->cost
.cost
- op_cost
;
2844 limit
.latency
= alg
->cost
.latency
- op_cost
;
2848 limit
.cost
= mult_cost
- op_cost
;
2849 limit
.latency
= mult_cost
- op_cost
;
2852 synth_mult (&alg2
, -val
, &limit
, mode
);
2853 alg2
.cost
.cost
+= op_cost
;
2854 alg2
.cost
.latency
+= op_cost
;
2855 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2856 *alg
= alg2
, *variant
= negate_variant
;
2859 /* This proves very useful for division-by-constant. */
2860 op_cost
= add_cost (speed
, mode
);
2861 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2863 limit
.cost
= alg
->cost
.cost
- op_cost
;
2864 limit
.latency
= alg
->cost
.latency
- op_cost
;
2868 limit
.cost
= mult_cost
- op_cost
;
2869 limit
.latency
= mult_cost
- op_cost
;
2872 synth_mult (&alg2
, val
- 1, &limit
, mode
);
2873 alg2
.cost
.cost
+= op_cost
;
2874 alg2
.cost
.latency
+= op_cost
;
2875 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2876 *alg
= alg2
, *variant
= add_variant
;
2878 return MULT_COST_LESS (&alg
->cost
, mult_cost
);
2881 /* A subroutine of expand_mult, used for constant multiplications.
2882 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2883 convenient. Use the shift/add sequence described by ALG and apply
2884 the final fixup specified by VARIANT. */
2887 expand_mult_const (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT val
,
2888 rtx target
, const struct algorithm
*alg
,
2889 enum mult_variant variant
)
2891 HOST_WIDE_INT val_so_far
;
2892 rtx insn
, accum
, tem
;
2894 enum machine_mode nmode
;
2896 /* Avoid referencing memory over and over and invalid sharing
2898 op0
= force_reg (mode
, op0
);
2900 /* ACCUM starts out either as OP0 or as a zero, depending on
2901 the first operation. */
2903 if (alg
->op
[0] == alg_zero
)
2905 accum
= copy_to_mode_reg (mode
, CONST0_RTX (mode
));
2908 else if (alg
->op
[0] == alg_m
)
2910 accum
= copy_to_mode_reg (mode
, op0
);
2916 for (opno
= 1; opno
< alg
->ops
; opno
++)
2918 int log
= alg
->log
[opno
];
2919 rtx shift_subtarget
= optimize
? 0 : accum
;
2921 = (opno
== alg
->ops
- 1 && target
!= 0 && variant
!= add_variant
2924 rtx accum_target
= optimize
? 0 : accum
;
2927 switch (alg
->op
[opno
])
2930 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
2931 /* REG_EQUAL note will be attached to the following insn. */
2932 emit_move_insn (accum
, tem
);
2937 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
, log
, NULL_RTX
, 0);
2938 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2939 add_target
? add_target
: accum_target
);
2940 val_so_far
+= (HOST_WIDE_INT
) 1 << log
;
2944 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
, log
, NULL_RTX
, 0);
2945 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, tem
),
2946 add_target
? add_target
: accum_target
);
2947 val_so_far
-= (HOST_WIDE_INT
) 1 << log
;
2951 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2952 log
, shift_subtarget
, 0);
2953 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
),
2954 add_target
? add_target
: accum_target
);
2955 val_so_far
= (val_so_far
<< log
) + 1;
2959 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2960 log
, shift_subtarget
, 0);
2961 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, op0
),
2962 add_target
? add_target
: accum_target
);
2963 val_so_far
= (val_so_far
<< log
) - 1;
2966 case alg_add_factor
:
2967 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
2968 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2969 add_target
? add_target
: accum_target
);
2970 val_so_far
+= val_so_far
<< log
;
2973 case alg_sub_factor
:
2974 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
2975 accum
= force_operand (gen_rtx_MINUS (mode
, tem
, accum
),
2977 ? add_target
: (optimize
? 0 : tem
)));
2978 val_so_far
= (val_so_far
<< log
) - val_so_far
;
2985 if (SCALAR_INT_MODE_P (mode
))
2987 /* Write a REG_EQUAL note on the last insn so that we can cse
2988 multiplication sequences. Note that if ACCUM is a SUBREG,
2989 we've set the inner register and must properly indicate that. */
2990 tem
= op0
, nmode
= mode
;
2991 accum_inner
= accum
;
2992 if (GET_CODE (accum
) == SUBREG
)
2994 accum_inner
= SUBREG_REG (accum
);
2995 nmode
= GET_MODE (accum_inner
);
2996 tem
= gen_lowpart (nmode
, op0
);
2999 insn
= get_last_insn ();
3000 set_dst_reg_note (insn
, REG_EQUAL
,
3001 gen_rtx_MULT (nmode
, tem
, GEN_INT (val_so_far
)),
3006 if (variant
== negate_variant
)
3008 val_so_far
= -val_so_far
;
3009 accum
= expand_unop (mode
, neg_optab
, accum
, target
, 0);
3011 else if (variant
== add_variant
)
3013 val_so_far
= val_so_far
+ 1;
3014 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
), target
);
3017 /* Compare only the bits of val and val_so_far that are significant
3018 in the result mode, to avoid sign-/zero-extension confusion. */
3019 nmode
= GET_MODE_INNER (mode
);
3020 if (nmode
== VOIDmode
)
3022 val
&= GET_MODE_MASK (nmode
);
3023 val_so_far
&= GET_MODE_MASK (nmode
);
3024 gcc_assert (val
== val_so_far
);
3029 /* Perform a multiplication and return an rtx for the result.
3030 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3031 TARGET is a suggestion for where to store the result (an rtx).
3033 We check specially for a constant integer as OP1.
3034 If you want this check for OP0 as well, then before calling
3035 you should swap the two operands if OP0 would be constant. */
3038 expand_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3041 enum mult_variant variant
;
3042 struct algorithm algorithm
;
3045 bool speed
= optimize_insn_for_speed_p ();
3046 bool do_trapv
= flag_trapv
&& SCALAR_INT_MODE_P (mode
) && !unsignedp
;
3048 if (CONSTANT_P (op0
))
3055 /* For vectors, there are several simplifications that can be made if
3056 all elements of the vector constant are identical. */
3058 if (GET_CODE (op1
) == CONST_VECTOR
)
3060 int i
, n
= CONST_VECTOR_NUNITS (op1
);
3061 scalar_op1
= CONST_VECTOR_ELT (op1
, 0);
3062 for (i
= 1; i
< n
; ++i
)
3063 if (!rtx_equal_p (scalar_op1
, CONST_VECTOR_ELT (op1
, i
)))
3067 if (INTEGRAL_MODE_P (mode
))
3070 HOST_WIDE_INT coeff
;
3074 if (op1
== CONST0_RTX (mode
))
3076 if (op1
== CONST1_RTX (mode
))
3078 if (op1
== CONSTM1_RTX (mode
))
3079 return expand_unop (mode
, do_trapv
? negv_optab
: neg_optab
,
3085 /* These are the operations that are potentially turned into
3086 a sequence of shifts and additions. */
3087 mode_bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
3089 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3090 less than or equal in size to `unsigned int' this doesn't matter.
3091 If the mode is larger than `unsigned int', then synth_mult works
3092 only if the constant value exactly fits in an `unsigned int' without
3093 any truncation. This means that multiplying by negative values does
3094 not work; results are off by 2^32 on a 32 bit machine. */
3096 if (CONST_INT_P (scalar_op1
))
3098 coeff
= INTVAL (scalar_op1
);
3101 else if (CONST_DOUBLE_AS_INT_P (scalar_op1
))
3103 /* If we are multiplying in DImode, it may still be a win
3104 to try to work with shifts and adds. */
3105 if (CONST_DOUBLE_HIGH (scalar_op1
) == 0
3106 && CONST_DOUBLE_LOW (scalar_op1
) > 0)
3108 coeff
= CONST_DOUBLE_LOW (scalar_op1
);
3111 else if (CONST_DOUBLE_LOW (scalar_op1
) == 0)
3113 coeff
= CONST_DOUBLE_HIGH (scalar_op1
);
3114 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3116 int shift
= floor_log2 (coeff
) + HOST_BITS_PER_WIDE_INT
;
3117 if (shift
< HOST_BITS_PER_DOUBLE_INT
- 1
3118 || mode_bitsize
<= HOST_BITS_PER_DOUBLE_INT
)
3119 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3120 shift
, target
, unsignedp
);
3130 /* We used to test optimize here, on the grounds that it's better to
3131 produce a smaller program when -O is not used. But this causes
3132 such a terrible slowdown sometimes that it seems better to always
3135 /* Special case powers of two. */
3136 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3137 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3138 floor_log2 (coeff
), target
, unsignedp
);
3140 fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3142 /* Attempt to handle multiplication of DImode values by negative
3143 coefficients, by performing the multiplication by a positive
3144 multiplier and then inverting the result. */
3145 if (is_neg
&& mode_bitsize
> HOST_BITS_PER_WIDE_INT
)
3147 /* Its safe to use -coeff even for INT_MIN, as the
3148 result is interpreted as an unsigned coefficient.
3149 Exclude cost of op0 from max_cost to match the cost
3150 calculation of the synth_mult. */
3151 max_cost
= (set_src_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), speed
)
3152 - neg_cost(speed
, mode
));
3154 && choose_mult_variant (mode
, -coeff
, &algorithm
,
3155 &variant
, max_cost
))
3157 rtx temp
= expand_mult_const (mode
, op0
, -coeff
, NULL_RTX
,
3158 &algorithm
, variant
);
3159 return expand_unop (mode
, neg_optab
, temp
, target
, 0);
3164 /* Exclude cost of op0 from max_cost to match the cost
3165 calculation of the synth_mult. */
3166 max_cost
= set_src_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), speed
);
3167 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
, max_cost
))
3168 return expand_mult_const (mode
, op0
, coeff
, target
,
3169 &algorithm
, variant
);
3173 /* Expand x*2.0 as x+x. */
3174 if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1
))
3177 REAL_VALUE_FROM_CONST_DOUBLE (d
, scalar_op1
);
3179 if (REAL_VALUES_EQUAL (d
, dconst2
))
3181 op0
= force_reg (GET_MODE (op0
), op0
);
3182 return expand_binop (mode
, add_optab
, op0
, op0
,
3183 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3188 /* This used to use umul_optab if unsigned, but for non-widening multiply
3189 there is no difference between signed and unsigned. */
3190 op0
= expand_binop (mode
, do_trapv
? smulv_optab
: smul_optab
,
3191 op0
, op1
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
3196 /* Return a cost estimate for multiplying a register by the given
3197 COEFFicient in the given MODE and SPEED. */
3200 mult_by_coeff_cost (HOST_WIDE_INT coeff
, enum machine_mode mode
, bool speed
)
3203 struct algorithm algorithm
;
3204 enum mult_variant variant
;
3206 rtx fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3207 max_cost
= set_src_cost (gen_rtx_MULT (mode
, fake_reg
, fake_reg
), speed
);
3208 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
, max_cost
))
3209 return algorithm
.cost
.cost
;
3214 /* Perform a widening multiplication and return an rtx for the result.
3215 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3216 TARGET is a suggestion for where to store the result (an rtx).
3217 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3218 or smul_widen_optab.
3220 We check specially for a constant integer as OP1, comparing the
3221 cost of a widening multiply against the cost of a sequence of shifts
3225 expand_widening_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3226 int unsignedp
, optab this_optab
)
3228 bool speed
= optimize_insn_for_speed_p ();
3231 if (CONST_INT_P (op1
)
3232 && GET_MODE (op0
) != VOIDmode
3233 && (cop1
= convert_modes (mode
, GET_MODE (op0
), op1
,
3234 this_optab
== umul_widen_optab
))
3235 && CONST_INT_P (cop1
)
3236 && (INTVAL (cop1
) >= 0
3237 || HWI_COMPUTABLE_MODE_P (mode
)))
3239 HOST_WIDE_INT coeff
= INTVAL (cop1
);
3241 enum mult_variant variant
;
3242 struct algorithm algorithm
;
3244 /* Special case powers of two. */
3245 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3247 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3248 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3249 floor_log2 (coeff
), target
, unsignedp
);
3252 /* Exclude cost of op0 from max_cost to match the cost
3253 calculation of the synth_mult. */
3254 max_cost
= mul_widen_cost (speed
, mode
);
3255 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3258 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3259 return expand_mult_const (mode
, op0
, coeff
, target
,
3260 &algorithm
, variant
);
3263 return expand_binop (mode
, this_optab
, op0
, op1
, target
,
3264 unsignedp
, OPTAB_LIB_WIDEN
);
3267 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3268 replace division by D, and put the least significant N bits of the result
3269 in *MULTIPLIER_PTR and return the most significant bit.
3271 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3272 needed precision is in PRECISION (should be <= N).
3274 PRECISION should be as small as possible so this function can choose
3275 multiplier more freely.
3277 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3278 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3280 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3281 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3283 unsigned HOST_WIDE_INT
3284 choose_multiplier (unsigned HOST_WIDE_INT d
, int n
, int precision
,
3285 unsigned HOST_WIDE_INT
*multiplier_ptr
,
3286 int *post_shift_ptr
, int *lgup_ptr
)
3288 double_int mhigh
, mlow
;
3289 int lgup
, post_shift
;
3292 /* lgup = ceil(log2(divisor)); */
3293 lgup
= ceil_log2 (d
);
3295 gcc_assert (lgup
<= n
);
3298 pow2
= n
+ lgup
- precision
;
3300 /* We could handle this with some effort, but this case is much
3301 better handled directly with a scc insn, so rely on caller using
3303 gcc_assert (pow
!= HOST_BITS_PER_DOUBLE_INT
);
3305 /* mlow = 2^(N + lgup)/d */
3306 double_int val
= double_int_zero
.set_bit (pow
);
3307 mlow
= val
.div (double_int::from_uhwi (d
), true, TRUNC_DIV_EXPR
);
3309 /* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
3310 val
|= double_int_zero
.set_bit (pow2
);
3311 mhigh
= val
.div (double_int::from_uhwi (d
), true, TRUNC_DIV_EXPR
);
3313 gcc_assert (!mhigh
.high
|| val
.high
- d
< d
);
3314 gcc_assert (mhigh
.high
<= 1 && mlow
.high
<= 1);
3315 /* Assert that mlow < mhigh. */
3316 gcc_assert (mlow
.ult (mhigh
));
3318 /* If precision == N, then mlow, mhigh exceed 2^N
3319 (but they do not exceed 2^(N+1)). */
3321 /* Reduce to lowest terms. */
3322 for (post_shift
= lgup
; post_shift
> 0; post_shift
--)
3324 int shft
= HOST_BITS_PER_WIDE_INT
- 1;
3325 unsigned HOST_WIDE_INT ml_lo
= (mlow
.high
<< shft
) | (mlow
.low
>> 1);
3326 unsigned HOST_WIDE_INT mh_lo
= (mhigh
.high
<< shft
) | (mhigh
.low
>> 1);
3330 mlow
= double_int::from_uhwi (ml_lo
);
3331 mhigh
= double_int::from_uhwi (mh_lo
);
3334 *post_shift_ptr
= post_shift
;
3336 if (n
< HOST_BITS_PER_WIDE_INT
)
3338 unsigned HOST_WIDE_INT mask
= ((unsigned HOST_WIDE_INT
) 1 << n
) - 1;
3339 *multiplier_ptr
= mhigh
.low
& mask
;
3340 return mhigh
.low
>= mask
;
3344 *multiplier_ptr
= mhigh
.low
;
3349 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3350 congruent to 1 (mod 2**N). */
3352 static unsigned HOST_WIDE_INT
3353 invert_mod2n (unsigned HOST_WIDE_INT x
, int n
)
3355 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3357 /* The algorithm notes that the choice y = x satisfies
3358 x*y == 1 mod 2^3, since x is assumed odd.
3359 Each iteration doubles the number of bits of significance in y. */
3361 unsigned HOST_WIDE_INT mask
;
3362 unsigned HOST_WIDE_INT y
= x
;
3365 mask
= (n
== HOST_BITS_PER_WIDE_INT
3366 ? ~(unsigned HOST_WIDE_INT
) 0
3367 : ((unsigned HOST_WIDE_INT
) 1 << n
) - 1);
3371 y
= y
* (2 - x
*y
) & mask
; /* Modulo 2^N */
3377 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3378 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3379 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3380 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3383 The result is put in TARGET if that is convenient.
3385 MODE is the mode of operation. */
3388 expand_mult_highpart_adjust (enum machine_mode mode
, rtx adj_operand
, rtx op0
,
3389 rtx op1
, rtx target
, int unsignedp
)
3392 enum rtx_code adj_code
= unsignedp
? PLUS
: MINUS
;
3394 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3395 GET_MODE_BITSIZE (mode
) - 1, NULL_RTX
, 0);
3396 tem
= expand_and (mode
, tem
, op1
, NULL_RTX
);
3398 = force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3401 tem
= expand_shift (RSHIFT_EXPR
, mode
, op1
,
3402 GET_MODE_BITSIZE (mode
) - 1, NULL_RTX
, 0);
3403 tem
= expand_and (mode
, tem
, op0
, NULL_RTX
);
3404 target
= force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3410 /* Subroutine of expmed_mult_highpart. Return the MODE high part of OP. */
3413 extract_high_half (enum machine_mode mode
, rtx op
)
3415 enum machine_mode wider_mode
;
3417 if (mode
== word_mode
)
3418 return gen_highpart (mode
, op
);
3420 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3422 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3423 op
= expand_shift (RSHIFT_EXPR
, wider_mode
, op
,
3424 GET_MODE_BITSIZE (mode
), 0, 1);
3425 return convert_modes (mode
, wider_mode
, op
, 0);
3428 /* Like expmed_mult_highpart, but only consider using a multiplication
3429 optab. OP1 is an rtx for the constant operand. */
3432 expmed_mult_highpart_optab (enum machine_mode mode
, rtx op0
, rtx op1
,
3433 rtx target
, int unsignedp
, int max_cost
)
3435 rtx narrow_op1
= gen_int_mode (INTVAL (op1
), mode
);
3436 enum machine_mode wider_mode
;
3440 bool speed
= optimize_insn_for_speed_p ();
3442 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3444 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3445 size
= GET_MODE_BITSIZE (mode
);
3447 /* Firstly, try using a multiplication insn that only generates the needed
3448 high part of the product, and in the sign flavor of unsignedp. */
3449 if (mul_highpart_cost (speed
, mode
) < max_cost
)
3451 moptab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
3452 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3453 unsignedp
, OPTAB_DIRECT
);
3458 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3459 Need to adjust the result after the multiplication. */
3460 if (size
- 1 < BITS_PER_WORD
3461 && (mul_highpart_cost (speed
, mode
)
3462 + 2 * shift_cost (speed
, mode
, size
-1)
3463 + 4 * add_cost (speed
, mode
) < max_cost
))
3465 moptab
= unsignedp
? smul_highpart_optab
: umul_highpart_optab
;
3466 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3467 unsignedp
, OPTAB_DIRECT
);
3469 /* We used the wrong signedness. Adjust the result. */
3470 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3474 /* Try widening multiplication. */
3475 moptab
= unsignedp
? umul_widen_optab
: smul_widen_optab
;
3476 if (widening_optab_handler (moptab
, wider_mode
, mode
) != CODE_FOR_nothing
3477 && mul_widen_cost (speed
, wider_mode
) < max_cost
)
3479 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
, 0,
3480 unsignedp
, OPTAB_WIDEN
);
3482 return extract_high_half (mode
, tem
);
3485 /* Try widening the mode and perform a non-widening multiplication. */
3486 if (optab_handler (smul_optab
, wider_mode
) != CODE_FOR_nothing
3487 && size
- 1 < BITS_PER_WORD
3488 && (mul_cost (speed
, wider_mode
) + shift_cost (speed
, mode
, size
-1)
3491 rtx insns
, wop0
, wop1
;
3493 /* We need to widen the operands, for example to ensure the
3494 constant multiplier is correctly sign or zero extended.
3495 Use a sequence to clean-up any instructions emitted by
3496 the conversions if things don't work out. */
3498 wop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
3499 wop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
3500 tem
= expand_binop (wider_mode
, smul_optab
, wop0
, wop1
, 0,
3501 unsignedp
, OPTAB_WIDEN
);
3502 insns
= get_insns ();
3508 return extract_high_half (mode
, tem
);
3512 /* Try widening multiplication of opposite signedness, and adjust. */
3513 moptab
= unsignedp
? smul_widen_optab
: umul_widen_optab
;
3514 if (widening_optab_handler (moptab
, wider_mode
, mode
) != CODE_FOR_nothing
3515 && size
- 1 < BITS_PER_WORD
3516 && (mul_widen_cost (speed
, wider_mode
)
3517 + 2 * shift_cost (speed
, mode
, size
-1)
3518 + 4 * add_cost (speed
, mode
) < max_cost
))
3520 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
,
3521 NULL_RTX
, ! unsignedp
, OPTAB_WIDEN
);
3524 tem
= extract_high_half (mode
, tem
);
3525 /* We used the wrong signedness. Adjust the result. */
3526 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3534 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3535 putting the high half of the result in TARGET if that is convenient,
3536 and return where the result is. If the operation can not be performed,
3539 MODE is the mode of operation and result.
3541 UNSIGNEDP nonzero means unsigned multiply.
3543 MAX_COST is the total allowed cost for the expanded RTL. */
3546 expmed_mult_highpart (enum machine_mode mode
, rtx op0
, rtx op1
,
3547 rtx target
, int unsignedp
, int max_cost
)
3549 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
3550 unsigned HOST_WIDE_INT cnst1
;
3552 bool sign_adjust
= false;
3553 enum mult_variant variant
;
3554 struct algorithm alg
;
3556 bool speed
= optimize_insn_for_speed_p ();
3558 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3559 /* We can't support modes wider than HOST_BITS_PER_INT. */
3560 gcc_assert (HWI_COMPUTABLE_MODE_P (mode
));
3562 cnst1
= INTVAL (op1
) & GET_MODE_MASK (mode
);
3564 /* We can't optimize modes wider than BITS_PER_WORD.
3565 ??? We might be able to perform double-word arithmetic if
3566 mode == word_mode, however all the cost calculations in
3567 synth_mult etc. assume single-word operations. */
3568 if (GET_MODE_BITSIZE (wider_mode
) > BITS_PER_WORD
)
3569 return expmed_mult_highpart_optab (mode
, op0
, op1
, target
,
3570 unsignedp
, max_cost
);
3572 extra_cost
= shift_cost (speed
, mode
, GET_MODE_BITSIZE (mode
) - 1);
3574 /* Check whether we try to multiply by a negative constant. */
3575 if (!unsignedp
&& ((cnst1
>> (GET_MODE_BITSIZE (mode
) - 1)) & 1))
3578 extra_cost
+= add_cost (speed
, mode
);
3581 /* See whether shift/add multiplication is cheap enough. */
3582 if (choose_mult_variant (wider_mode
, cnst1
, &alg
, &variant
,
3583 max_cost
- extra_cost
))
3585 /* See whether the specialized multiplication optabs are
3586 cheaper than the shift/add version. */
3587 tem
= expmed_mult_highpart_optab (mode
, op0
, op1
, target
, unsignedp
,
3588 alg
.cost
.cost
+ extra_cost
);
3592 tem
= convert_to_mode (wider_mode
, op0
, unsignedp
);
3593 tem
= expand_mult_const (wider_mode
, tem
, cnst1
, 0, &alg
, variant
);
3594 tem
= extract_high_half (mode
, tem
);
3596 /* Adjust result for signedness. */
3598 tem
= force_operand (gen_rtx_MINUS (mode
, tem
, op0
), tem
);
3602 return expmed_mult_highpart_optab (mode
, op0
, op1
, target
,
3603 unsignedp
, max_cost
);
3607 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3610 expand_smod_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3612 unsigned HOST_WIDE_INT masklow
, maskhigh
;
3613 rtx result
, temp
, shift
, label
;
3616 logd
= floor_log2 (d
);
3617 result
= gen_reg_rtx (mode
);
3619 /* Avoid conditional branches when they're expensive. */
3620 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3621 && optimize_insn_for_speed_p ())
3623 rtx signmask
= emit_store_flag (result
, LT
, op0
, const0_rtx
,
3627 signmask
= force_reg (mode
, signmask
);
3628 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3629 shift
= GEN_INT (GET_MODE_BITSIZE (mode
) - logd
);
3631 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3632 which instruction sequence to use. If logical right shifts
3633 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3634 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3636 temp
= gen_rtx_LSHIFTRT (mode
, result
, shift
);
3637 if (optab_handler (lshr_optab
, mode
) == CODE_FOR_nothing
3638 || (set_src_cost (temp
, optimize_insn_for_speed_p ())
3639 > COSTS_N_INSNS (2)))
3641 temp
= expand_binop (mode
, xor_optab
, op0
, signmask
,
3642 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3643 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3644 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3645 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3646 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3647 temp
= expand_binop (mode
, xor_optab
, temp
, signmask
,
3648 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3649 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3650 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3654 signmask
= expand_binop (mode
, lshr_optab
, signmask
, shift
,
3655 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3656 signmask
= force_reg (mode
, signmask
);
3658 temp
= expand_binop (mode
, add_optab
, op0
, signmask
,
3659 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3660 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3661 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3662 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3663 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3669 /* Mask contains the mode's signbit and the significant bits of the
3670 modulus. By including the signbit in the operation, many targets
3671 can avoid an explicit compare operation in the following comparison
3674 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3675 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3677 masklow
|= (HOST_WIDE_INT
) -1 << (GET_MODE_BITSIZE (mode
) - 1);
3681 maskhigh
= (HOST_WIDE_INT
) -1
3682 << (GET_MODE_BITSIZE (mode
) - HOST_BITS_PER_WIDE_INT
- 1);
3684 temp
= expand_binop (mode
, and_optab
, op0
,
3685 immed_double_const (masklow
, maskhigh
, mode
),
3686 result
, 1, OPTAB_LIB_WIDEN
);
3688 emit_move_insn (result
, temp
);
3690 label
= gen_label_rtx ();
3691 do_cmp_and_jump (result
, const0_rtx
, GE
, mode
, label
);
3693 temp
= expand_binop (mode
, sub_optab
, result
, const1_rtx
, result
,
3694 0, OPTAB_LIB_WIDEN
);
3695 masklow
= (HOST_WIDE_INT
) -1 << logd
;
3697 temp
= expand_binop (mode
, ior_optab
, temp
,
3698 immed_double_const (masklow
, maskhigh
, mode
),
3699 result
, 1, OPTAB_LIB_WIDEN
);
3700 temp
= expand_binop (mode
, add_optab
, temp
, const1_rtx
, result
,
3701 0, OPTAB_LIB_WIDEN
);
3703 emit_move_insn (result
, temp
);
3708 /* Expand signed division of OP0 by a power of two D in mode MODE.
3709 This routine is only called for positive values of D. */
3712 expand_sdiv_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3717 logd
= floor_log2 (d
);
3720 && BRANCH_COST (optimize_insn_for_speed_p (),
3723 temp
= gen_reg_rtx (mode
);
3724 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, 1);
3725 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3726 0, OPTAB_LIB_WIDEN
);
3727 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
3730 #ifdef HAVE_conditional_move
3731 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3736 /* ??? emit_conditional_move forces a stack adjustment via
3737 compare_from_rtx so, if the sequence is discarded, it will
3738 be lost. Do it now instead. */
3739 do_pending_stack_adjust ();
3742 temp2
= copy_to_mode_reg (mode
, op0
);
3743 temp
= expand_binop (mode
, add_optab
, temp2
, GEN_INT (d
-1),
3744 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3745 temp
= force_reg (mode
, temp
);
3747 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3748 temp2
= emit_conditional_move (temp2
, LT
, temp2
, const0_rtx
,
3749 mode
, temp
, temp2
, mode
, 0);
3752 rtx seq
= get_insns ();
3755 return expand_shift (RSHIFT_EXPR
, mode
, temp2
, logd
, NULL_RTX
, 0);
3761 if (BRANCH_COST (optimize_insn_for_speed_p (),
3764 int ushift
= GET_MODE_BITSIZE (mode
) - logd
;
3766 temp
= gen_reg_rtx (mode
);
3767 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, -1);
3768 if (shift_cost (optimize_insn_for_speed_p (), mode
, ushift
)
3769 > COSTS_N_INSNS (1))
3770 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (d
- 1),
3771 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3773 temp
= expand_shift (RSHIFT_EXPR
, mode
, temp
,
3774 ushift
, NULL_RTX
, 1);
3775 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3776 0, OPTAB_LIB_WIDEN
);
3777 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
3780 label
= gen_label_rtx ();
3781 temp
= copy_to_mode_reg (mode
, op0
);
3782 do_cmp_and_jump (temp
, const0_rtx
, GE
, mode
, label
);
3783 expand_inc (temp
, GEN_INT (d
- 1));
3785 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
3788 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3789 if that is convenient, and returning where the result is.
3790 You may request either the quotient or the remainder as the result;
3791 specify REM_FLAG nonzero to get the remainder.
3793 CODE is the expression code for which kind of division this is;
3794 it controls how rounding is done. MODE is the machine mode to use.
3795 UNSIGNEDP nonzero means do unsigned division. */
3797 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3798 and then correct it by or'ing in missing high bits
3799 if result of ANDI is nonzero.
3800 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3801 This could optimize to a bfexts instruction.
3802 But C doesn't use these operations, so their optimizations are
3804 /* ??? For modulo, we don't actually need the highpart of the first product,
3805 the low part will do nicely. And for small divisors, the second multiply
3806 can also be a low-part only multiply or even be completely left out.
3807 E.g. to calculate the remainder of a division by 3 with a 32 bit
3808 multiply, multiply with 0x55555556 and extract the upper two bits;
3809 the result is exact for inputs up to 0x1fffffff.
3810 The input range can be reduced by using cross-sum rules.
3811 For odd divisors >= 3, the following table gives right shift counts
3812 so that if a number is shifted by an integer multiple of the given
3813 amount, the remainder stays the same:
3814 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3815 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3816 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3817 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3818 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3820 Cross-sum rules for even numbers can be derived by leaving as many bits
3821 to the right alone as the divisor has zeros to the right.
3822 E.g. if x is an unsigned 32 bit number:
3823 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3827 expand_divmod (int rem_flag
, enum tree_code code
, enum machine_mode mode
,
3828 rtx op0
, rtx op1
, rtx target
, int unsignedp
)
3830 enum machine_mode compute_mode
;
3832 rtx quotient
= 0, remainder
= 0;
3836 optab optab1
, optab2
;
3837 int op1_is_constant
, op1_is_pow2
= 0;
3838 int max_cost
, extra_cost
;
3839 static HOST_WIDE_INT last_div_const
= 0;
3840 static HOST_WIDE_INT ext_op1
;
3841 bool speed
= optimize_insn_for_speed_p ();
3843 op1_is_constant
= CONST_INT_P (op1
);
3844 if (op1_is_constant
)
3846 ext_op1
= INTVAL (op1
);
3848 ext_op1
&= GET_MODE_MASK (mode
);
3849 op1_is_pow2
= ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1
)
3850 || (! unsignedp
&& EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1
))));
3854 This is the structure of expand_divmod:
3856 First comes code to fix up the operands so we can perform the operations
3857 correctly and efficiently.
3859 Second comes a switch statement with code specific for each rounding mode.
3860 For some special operands this code emits all RTL for the desired
3861 operation, for other cases, it generates only a quotient and stores it in
3862 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3863 to indicate that it has not done anything.
3865 Last comes code that finishes the operation. If QUOTIENT is set and
3866 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3867 QUOTIENT is not set, it is computed using trunc rounding.
3869 We try to generate special code for division and remainder when OP1 is a
3870 constant. If |OP1| = 2**n we can use shifts and some other fast
3871 operations. For other values of OP1, we compute a carefully selected
3872 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3875 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3876 half of the product. Different strategies for generating the product are
3877 implemented in expmed_mult_highpart.
3879 If what we actually want is the remainder, we generate that by another
3880 by-constant multiplication and a subtraction. */
3882 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3883 code below will malfunction if we are, so check here and handle
3884 the special case if so. */
3885 if (op1
== const1_rtx
)
3886 return rem_flag
? const0_rtx
: op0
;
3888 /* When dividing by -1, we could get an overflow.
3889 negv_optab can handle overflows. */
3890 if (! unsignedp
&& op1
== constm1_rtx
)
3894 return expand_unop (mode
, flag_trapv
&& GET_MODE_CLASS(mode
) == MODE_INT
3895 ? negv_optab
: neg_optab
, op0
, target
, 0);
3899 /* Don't use the function value register as a target
3900 since we have to read it as well as write it,
3901 and function-inlining gets confused by this. */
3902 && ((REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
3903 /* Don't clobber an operand while doing a multi-step calculation. */
3904 || ((rem_flag
|| op1_is_constant
)
3905 && (reg_mentioned_p (target
, op0
)
3906 || (MEM_P (op0
) && MEM_P (target
))))
3907 || reg_mentioned_p (target
, op1
)
3908 || (MEM_P (op1
) && MEM_P (target
))))
3911 /* Get the mode in which to perform this computation. Normally it will
3912 be MODE, but sometimes we can't do the desired operation in MODE.
3913 If so, pick a wider mode in which we can do the operation. Convert
3914 to that mode at the start to avoid repeated conversions.
3916 First see what operations we need. These depend on the expression
3917 we are evaluating. (We assume that divxx3 insns exist under the
3918 same conditions that modxx3 insns and that these insns don't normally
3919 fail. If these assumptions are not correct, we may generate less
3920 efficient code in some cases.)
3922 Then see if we find a mode in which we can open-code that operation
3923 (either a division, modulus, or shift). Finally, check for the smallest
3924 mode for which we can do the operation with a library call. */
3926 /* We might want to refine this now that we have division-by-constant
3927 optimization. Since expmed_mult_highpart tries so many variants, it is
3928 not straightforward to generalize this. Maybe we should make an array
3929 of possible modes in init_expmed? Save this for GCC 2.7. */
3931 optab1
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3932 ? (unsignedp
? lshr_optab
: ashr_optab
)
3933 : (unsignedp
? udiv_optab
: sdiv_optab
));
3934 optab2
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3936 : (unsignedp
? udivmod_optab
: sdivmod_optab
));
3938 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3939 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3940 if (optab_handler (optab1
, compute_mode
) != CODE_FOR_nothing
3941 || optab_handler (optab2
, compute_mode
) != CODE_FOR_nothing
)
3944 if (compute_mode
== VOIDmode
)
3945 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3946 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3947 if (optab_libfunc (optab1
, compute_mode
)
3948 || optab_libfunc (optab2
, compute_mode
))
3951 /* If we still couldn't find a mode, use MODE, but expand_binop will
3953 if (compute_mode
== VOIDmode
)
3954 compute_mode
= mode
;
3956 if (target
&& GET_MODE (target
) == compute_mode
)
3959 tquotient
= gen_reg_rtx (compute_mode
);
3961 size
= GET_MODE_BITSIZE (compute_mode
);
3963 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3964 (mode), and thereby get better code when OP1 is a constant. Do that
3965 later. It will require going over all usages of SIZE below. */
3966 size
= GET_MODE_BITSIZE (mode
);
3969 /* Only deduct something for a REM if the last divide done was
3970 for a different constant. Then set the constant of the last
3972 max_cost
= (unsignedp
3973 ? udiv_cost (speed
, compute_mode
)
3974 : sdiv_cost (speed
, compute_mode
));
3975 if (rem_flag
&& ! (last_div_const
!= 0 && op1_is_constant
3976 && INTVAL (op1
) == last_div_const
))
3977 max_cost
-= (mul_cost (speed
, compute_mode
)
3978 + add_cost (speed
, compute_mode
));
3980 last_div_const
= ! rem_flag
&& op1_is_constant
? INTVAL (op1
) : 0;
3982 /* Now convert to the best mode to use. */
3983 if (compute_mode
!= mode
)
3985 op0
= convert_modes (compute_mode
, mode
, op0
, unsignedp
);
3986 op1
= convert_modes (compute_mode
, mode
, op1
, unsignedp
);
3988 /* convert_modes may have placed op1 into a register, so we
3989 must recompute the following. */
3990 op1_is_constant
= CONST_INT_P (op1
);
3991 op1_is_pow2
= (op1_is_constant
3992 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
3994 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1
)))))) ;
3997 /* If one of the operands is a volatile MEM, copy it into a register. */
3999 if (MEM_P (op0
) && MEM_VOLATILE_P (op0
))
4000 op0
= force_reg (compute_mode
, op0
);
4001 if (MEM_P (op1
) && MEM_VOLATILE_P (op1
))
4002 op1
= force_reg (compute_mode
, op1
);
4004 /* If we need the remainder or if OP1 is constant, we need to
4005 put OP0 in a register in case it has any queued subexpressions. */
4006 if (rem_flag
|| op1_is_constant
)
4007 op0
= force_reg (compute_mode
, op0
);
4009 last
= get_last_insn ();
4011 /* Promote floor rounding to trunc rounding for unsigned operations. */
4014 if (code
== FLOOR_DIV_EXPR
)
4015 code
= TRUNC_DIV_EXPR
;
4016 if (code
== FLOOR_MOD_EXPR
)
4017 code
= TRUNC_MOD_EXPR
;
4018 if (code
== EXACT_DIV_EXPR
&& op1_is_pow2
)
4019 code
= TRUNC_DIV_EXPR
;
4022 if (op1
!= const0_rtx
)
4025 case TRUNC_MOD_EXPR
:
4026 case TRUNC_DIV_EXPR
:
4027 if (op1_is_constant
)
4031 unsigned HOST_WIDE_INT mh
, ml
;
4032 int pre_shift
, post_shift
;
4034 unsigned HOST_WIDE_INT d
= (INTVAL (op1
)
4035 & GET_MODE_MASK (compute_mode
));
4037 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4039 pre_shift
= floor_log2 (d
);
4043 = expand_binop (compute_mode
, and_optab
, op0
,
4044 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4048 return gen_lowpart (mode
, remainder
);
4050 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4051 pre_shift
, tquotient
, 1);
4053 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4055 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (size
- 1)))
4057 /* Most significant bit of divisor is set; emit an scc
4059 quotient
= emit_store_flag_force (tquotient
, GEU
, op0
, op1
,
4060 compute_mode
, 1, 1);
4064 /* Find a suitable multiplier and right shift count
4065 instead of multiplying with D. */
4067 mh
= choose_multiplier (d
, size
, size
,
4068 &ml
, &post_shift
, &dummy
);
4070 /* If the suggested multiplier is more than SIZE bits,
4071 we can do better for even divisors, using an
4072 initial right shift. */
4073 if (mh
!= 0 && (d
& 1) == 0)
4075 pre_shift
= floor_log2 (d
& -d
);
4076 mh
= choose_multiplier (d
>> pre_shift
, size
,
4078 &ml
, &post_shift
, &dummy
);
4088 if (post_shift
- 1 >= BITS_PER_WORD
)
4092 = (shift_cost (speed
, compute_mode
, post_shift
- 1)
4093 + shift_cost (speed
, compute_mode
, 1)
4094 + 2 * add_cost (speed
, compute_mode
));
4095 t1
= expmed_mult_highpart (compute_mode
, op0
,
4098 max_cost
- extra_cost
);
4101 t2
= force_operand (gen_rtx_MINUS (compute_mode
,
4104 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
,
4105 t2
, 1, NULL_RTX
, 1);
4106 t4
= force_operand (gen_rtx_PLUS (compute_mode
,
4109 quotient
= expand_shift
4110 (RSHIFT_EXPR
, compute_mode
, t4
,
4111 post_shift
- 1, tquotient
, 1);
4117 if (pre_shift
>= BITS_PER_WORD
4118 || post_shift
>= BITS_PER_WORD
)
4122 (RSHIFT_EXPR
, compute_mode
, op0
,
4123 pre_shift
, NULL_RTX
, 1);
4125 = (shift_cost (speed
, compute_mode
, pre_shift
)
4126 + shift_cost (speed
, compute_mode
, post_shift
));
4127 t2
= expmed_mult_highpart (compute_mode
, t1
,
4130 max_cost
- extra_cost
);
4133 quotient
= expand_shift
4134 (RSHIFT_EXPR
, compute_mode
, t2
,
4135 post_shift
, tquotient
, 1);
4139 else /* Too wide mode to use tricky code */
4142 insn
= get_last_insn ();
4144 set_dst_reg_note (insn
, REG_EQUAL
,
4145 gen_rtx_UDIV (compute_mode
, op0
, op1
),
4148 else /* TRUNC_DIV, signed */
4150 unsigned HOST_WIDE_INT ml
;
4151 int lgup
, post_shift
;
4153 HOST_WIDE_INT d
= INTVAL (op1
);
4154 unsigned HOST_WIDE_INT abs_d
;
4156 /* Since d might be INT_MIN, we have to cast to
4157 unsigned HOST_WIDE_INT before negating to avoid
4158 undefined signed overflow. */
4160 ? (unsigned HOST_WIDE_INT
) d
4161 : - (unsigned HOST_WIDE_INT
) d
);
4163 /* n rem d = n rem -d */
4164 if (rem_flag
&& d
< 0)
4167 op1
= gen_int_mode (abs_d
, compute_mode
);
4173 quotient
= expand_unop (compute_mode
, neg_optab
, op0
,
4175 else if (HOST_BITS_PER_WIDE_INT
>= size
4176 && abs_d
== (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4178 /* This case is not handled correctly below. */
4179 quotient
= emit_store_flag (tquotient
, EQ
, op0
, op1
,
4180 compute_mode
, 1, 1);
4184 else if (EXACT_POWER_OF_2_OR_ZERO_P (d
)
4186 ? smod_pow2_cheap (speed
, compute_mode
)
4187 : sdiv_pow2_cheap (speed
, compute_mode
))
4188 /* We assume that cheap metric is true if the
4189 optab has an expander for this mode. */
4190 && ((optab_handler ((rem_flag
? smod_optab
4193 != CODE_FOR_nothing
)
4194 || (optab_handler (sdivmod_optab
,
4196 != CODE_FOR_nothing
)))
4198 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d
))
4202 remainder
= expand_smod_pow2 (compute_mode
, op0
, d
);
4204 return gen_lowpart (mode
, remainder
);
4207 if (sdiv_pow2_cheap (speed
, compute_mode
)
4208 && ((optab_handler (sdiv_optab
, compute_mode
)
4209 != CODE_FOR_nothing
)
4210 || (optab_handler (sdivmod_optab
, compute_mode
)
4211 != CODE_FOR_nothing
)))
4212 quotient
= expand_divmod (0, TRUNC_DIV_EXPR
,
4214 gen_int_mode (abs_d
,
4218 quotient
= expand_sdiv_pow2 (compute_mode
, op0
, abs_d
);
4220 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4221 negate the quotient. */
4224 insn
= get_last_insn ();
4226 && abs_d
< ((unsigned HOST_WIDE_INT
) 1
4227 << (HOST_BITS_PER_WIDE_INT
- 1)))
4228 set_dst_reg_note (insn
, REG_EQUAL
,
4229 gen_rtx_DIV (compute_mode
, op0
,
4235 quotient
= expand_unop (compute_mode
, neg_optab
,
4236 quotient
, quotient
, 0);
4239 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4241 choose_multiplier (abs_d
, size
, size
- 1,
4242 &ml
, &post_shift
, &lgup
);
4243 if (ml
< (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4247 if (post_shift
>= BITS_PER_WORD
4248 || size
- 1 >= BITS_PER_WORD
)
4251 extra_cost
= (shift_cost (speed
, compute_mode
, post_shift
)
4252 + shift_cost (speed
, compute_mode
, size
- 1)
4253 + add_cost (speed
, compute_mode
));
4254 t1
= expmed_mult_highpart (compute_mode
, op0
,
4255 GEN_INT (ml
), NULL_RTX
, 0,
4256 max_cost
- extra_cost
);
4260 (RSHIFT_EXPR
, compute_mode
, t1
,
4261 post_shift
, NULL_RTX
, 0);
4263 (RSHIFT_EXPR
, compute_mode
, op0
,
4264 size
- 1, NULL_RTX
, 0);
4267 = force_operand (gen_rtx_MINUS (compute_mode
,
4272 = force_operand (gen_rtx_MINUS (compute_mode
,
4280 if (post_shift
>= BITS_PER_WORD
4281 || size
- 1 >= BITS_PER_WORD
)
4284 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (size
- 1);
4285 mlr
= gen_int_mode (ml
, compute_mode
);
4286 extra_cost
= (shift_cost (speed
, compute_mode
, post_shift
)
4287 + shift_cost (speed
, compute_mode
, size
- 1)
4288 + 2 * add_cost (speed
, compute_mode
));
4289 t1
= expmed_mult_highpart (compute_mode
, op0
, mlr
,
4291 max_cost
- extra_cost
);
4294 t2
= force_operand (gen_rtx_PLUS (compute_mode
,
4298 (RSHIFT_EXPR
, compute_mode
, t2
,
4299 post_shift
, NULL_RTX
, 0);
4301 (RSHIFT_EXPR
, compute_mode
, op0
,
4302 size
- 1, NULL_RTX
, 0);
4305 = force_operand (gen_rtx_MINUS (compute_mode
,
4310 = force_operand (gen_rtx_MINUS (compute_mode
,
4315 else /* Too wide mode to use tricky code */
4318 insn
= get_last_insn ();
4320 set_dst_reg_note (insn
, REG_EQUAL
,
4321 gen_rtx_DIV (compute_mode
, op0
, op1
),
4327 delete_insns_since (last
);
4330 case FLOOR_DIV_EXPR
:
4331 case FLOOR_MOD_EXPR
:
4332 /* We will come here only for signed operations. */
4333 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4335 unsigned HOST_WIDE_INT mh
, ml
;
4336 int pre_shift
, lgup
, post_shift
;
4337 HOST_WIDE_INT d
= INTVAL (op1
);
4341 /* We could just as easily deal with negative constants here,
4342 but it does not seem worth the trouble for GCC 2.6. */
4343 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4345 pre_shift
= floor_log2 (d
);
4348 remainder
= expand_binop (compute_mode
, and_optab
, op0
,
4349 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4350 remainder
, 0, OPTAB_LIB_WIDEN
);
4352 return gen_lowpart (mode
, remainder
);
4354 quotient
= expand_shift
4355 (RSHIFT_EXPR
, compute_mode
, op0
,
4356 pre_shift
, tquotient
, 0);
4362 mh
= choose_multiplier (d
, size
, size
- 1,
4363 &ml
, &post_shift
, &lgup
);
4366 if (post_shift
< BITS_PER_WORD
4367 && size
- 1 < BITS_PER_WORD
)
4370 (RSHIFT_EXPR
, compute_mode
, op0
,
4371 size
- 1, NULL_RTX
, 0);
4372 t2
= expand_binop (compute_mode
, xor_optab
, op0
, t1
,
4373 NULL_RTX
, 0, OPTAB_WIDEN
);
4374 extra_cost
= (shift_cost (speed
, compute_mode
, post_shift
)
4375 + shift_cost (speed
, compute_mode
, size
- 1)
4376 + 2 * add_cost (speed
, compute_mode
));
4377 t3
= expmed_mult_highpart (compute_mode
, t2
,
4378 GEN_INT (ml
), NULL_RTX
, 1,
4379 max_cost
- extra_cost
);
4383 (RSHIFT_EXPR
, compute_mode
, t3
,
4384 post_shift
, NULL_RTX
, 1);
4385 quotient
= expand_binop (compute_mode
, xor_optab
,
4386 t4
, t1
, tquotient
, 0,
4394 rtx nsign
, t1
, t2
, t3
, t4
;
4395 t1
= force_operand (gen_rtx_PLUS (compute_mode
,
4396 op0
, constm1_rtx
), NULL_RTX
);
4397 t2
= expand_binop (compute_mode
, ior_optab
, op0
, t1
, NULL_RTX
,
4399 nsign
= expand_shift
4400 (RSHIFT_EXPR
, compute_mode
, t2
,
4401 size
- 1, NULL_RTX
, 0);
4402 t3
= force_operand (gen_rtx_MINUS (compute_mode
, t1
, nsign
),
4404 t4
= expand_divmod (0, TRUNC_DIV_EXPR
, compute_mode
, t3
, op1
,
4409 t5
= expand_unop (compute_mode
, one_cmpl_optab
, nsign
,
4411 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4420 delete_insns_since (last
);
4422 /* Try using an instruction that produces both the quotient and
4423 remainder, using truncation. We can easily compensate the quotient
4424 or remainder to get floor rounding, once we have the remainder.
4425 Notice that we compute also the final remainder value here,
4426 and return the result right away. */
4427 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4428 target
= gen_reg_rtx (compute_mode
);
4433 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4434 quotient
= gen_reg_rtx (compute_mode
);
4439 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4440 remainder
= gen_reg_rtx (compute_mode
);
4443 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
,
4444 quotient
, remainder
, 0))
4446 /* This could be computed with a branch-less sequence.
4447 Save that for later. */
4449 rtx label
= gen_label_rtx ();
4450 do_cmp_and_jump (remainder
, const0_rtx
, EQ
, compute_mode
, label
);
4451 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4452 NULL_RTX
, 0, OPTAB_WIDEN
);
4453 do_cmp_and_jump (tem
, const0_rtx
, GE
, compute_mode
, label
);
4454 expand_dec (quotient
, const1_rtx
);
4455 expand_inc (remainder
, op1
);
4457 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4460 /* No luck with division elimination or divmod. Have to do it
4461 by conditionally adjusting op0 *and* the result. */
4463 rtx label1
, label2
, label3
, label4
, label5
;
4467 quotient
= gen_reg_rtx (compute_mode
);
4468 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4469 label1
= gen_label_rtx ();
4470 label2
= gen_label_rtx ();
4471 label3
= gen_label_rtx ();
4472 label4
= gen_label_rtx ();
4473 label5
= gen_label_rtx ();
4474 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4475 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
, compute_mode
, label1
);
4476 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4477 quotient
, 0, OPTAB_LIB_WIDEN
);
4478 if (tem
!= quotient
)
4479 emit_move_insn (quotient
, tem
);
4480 emit_jump_insn (gen_jump (label5
));
4482 emit_label (label1
);
4483 expand_inc (adjusted_op0
, const1_rtx
);
4484 emit_jump_insn (gen_jump (label4
));
4486 emit_label (label2
);
4487 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
, compute_mode
, label3
);
4488 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4489 quotient
, 0, OPTAB_LIB_WIDEN
);
4490 if (tem
!= quotient
)
4491 emit_move_insn (quotient
, tem
);
4492 emit_jump_insn (gen_jump (label5
));
4494 emit_label (label3
);
4495 expand_dec (adjusted_op0
, const1_rtx
);
4496 emit_label (label4
);
4497 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4498 quotient
, 0, OPTAB_LIB_WIDEN
);
4499 if (tem
!= quotient
)
4500 emit_move_insn (quotient
, tem
);
4501 expand_dec (quotient
, const1_rtx
);
4502 emit_label (label5
);
4510 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
)))
4513 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4514 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4515 floor_log2 (d
), tquotient
, 1);
4516 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4518 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4519 t3
= gen_reg_rtx (compute_mode
);
4520 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4521 compute_mode
, 1, 1);
4525 lab
= gen_label_rtx ();
4526 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4527 expand_inc (t1
, const1_rtx
);
4532 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4538 /* Try using an instruction that produces both the quotient and
4539 remainder, using truncation. We can easily compensate the
4540 quotient or remainder to get ceiling rounding, once we have the
4541 remainder. Notice that we compute also the final remainder
4542 value here, and return the result right away. */
4543 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4544 target
= gen_reg_rtx (compute_mode
);
4548 remainder
= (REG_P (target
)
4549 ? target
: gen_reg_rtx (compute_mode
));
4550 quotient
= gen_reg_rtx (compute_mode
);
4554 quotient
= (REG_P (target
)
4555 ? target
: gen_reg_rtx (compute_mode
));
4556 remainder
= gen_reg_rtx (compute_mode
);
4559 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
,
4562 /* This could be computed with a branch-less sequence.
4563 Save that for later. */
4564 rtx label
= gen_label_rtx ();
4565 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4566 compute_mode
, label
);
4567 expand_inc (quotient
, const1_rtx
);
4568 expand_dec (remainder
, op1
);
4570 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4573 /* No luck with division elimination or divmod. Have to do it
4574 by conditionally adjusting op0 *and* the result. */
4577 rtx adjusted_op0
, tem
;
4579 quotient
= gen_reg_rtx (compute_mode
);
4580 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4581 label1
= gen_label_rtx ();
4582 label2
= gen_label_rtx ();
4583 do_cmp_and_jump (adjusted_op0
, const0_rtx
, NE
,
4584 compute_mode
, label1
);
4585 emit_move_insn (quotient
, const0_rtx
);
4586 emit_jump_insn (gen_jump (label2
));
4588 emit_label (label1
);
4589 expand_dec (adjusted_op0
, const1_rtx
);
4590 tem
= expand_binop (compute_mode
, udiv_optab
, adjusted_op0
, op1
,
4591 quotient
, 1, OPTAB_LIB_WIDEN
);
4592 if (tem
!= quotient
)
4593 emit_move_insn (quotient
, tem
);
4594 expand_inc (quotient
, const1_rtx
);
4595 emit_label (label2
);
4600 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4601 && INTVAL (op1
) >= 0)
4603 /* This is extremely similar to the code for the unsigned case
4604 above. For 2.7 we should merge these variants, but for
4605 2.6.1 I don't want to touch the code for unsigned since that
4606 get used in C. The signed case will only be used by other
4610 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4611 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4612 floor_log2 (d
), tquotient
, 0);
4613 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4615 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4616 t3
= gen_reg_rtx (compute_mode
);
4617 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4618 compute_mode
, 1, 1);
4622 lab
= gen_label_rtx ();
4623 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4624 expand_inc (t1
, const1_rtx
);
4629 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4635 /* Try using an instruction that produces both the quotient and
4636 remainder, using truncation. We can easily compensate the
4637 quotient or remainder to get ceiling rounding, once we have the
4638 remainder. Notice that we compute also the final remainder
4639 value here, and return the result right away. */
4640 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4641 target
= gen_reg_rtx (compute_mode
);
4644 remainder
= (REG_P (target
)
4645 ? target
: gen_reg_rtx (compute_mode
));
4646 quotient
= gen_reg_rtx (compute_mode
);
4650 quotient
= (REG_P (target
)
4651 ? target
: gen_reg_rtx (compute_mode
));
4652 remainder
= gen_reg_rtx (compute_mode
);
4655 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
,
4658 /* This could be computed with a branch-less sequence.
4659 Save that for later. */
4661 rtx label
= gen_label_rtx ();
4662 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4663 compute_mode
, label
);
4664 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4665 NULL_RTX
, 0, OPTAB_WIDEN
);
4666 do_cmp_and_jump (tem
, const0_rtx
, LT
, compute_mode
, label
);
4667 expand_inc (quotient
, const1_rtx
);
4668 expand_dec (remainder
, op1
);
4670 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4673 /* No luck with division elimination or divmod. Have to do it
4674 by conditionally adjusting op0 *and* the result. */
4676 rtx label1
, label2
, label3
, label4
, label5
;
4680 quotient
= gen_reg_rtx (compute_mode
);
4681 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4682 label1
= gen_label_rtx ();
4683 label2
= gen_label_rtx ();
4684 label3
= gen_label_rtx ();
4685 label4
= gen_label_rtx ();
4686 label5
= gen_label_rtx ();
4687 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4688 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
,
4689 compute_mode
, label1
);
4690 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4691 quotient
, 0, OPTAB_LIB_WIDEN
);
4692 if (tem
!= quotient
)
4693 emit_move_insn (quotient
, tem
);
4694 emit_jump_insn (gen_jump (label5
));
4696 emit_label (label1
);
4697 expand_dec (adjusted_op0
, const1_rtx
);
4698 emit_jump_insn (gen_jump (label4
));
4700 emit_label (label2
);
4701 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
,
4702 compute_mode
, label3
);
4703 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4704 quotient
, 0, OPTAB_LIB_WIDEN
);
4705 if (tem
!= quotient
)
4706 emit_move_insn (quotient
, tem
);
4707 emit_jump_insn (gen_jump (label5
));
4709 emit_label (label3
);
4710 expand_inc (adjusted_op0
, const1_rtx
);
4711 emit_label (label4
);
4712 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4713 quotient
, 0, OPTAB_LIB_WIDEN
);
4714 if (tem
!= quotient
)
4715 emit_move_insn (quotient
, tem
);
4716 expand_inc (quotient
, const1_rtx
);
4717 emit_label (label5
);
4722 case EXACT_DIV_EXPR
:
4723 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4725 HOST_WIDE_INT d
= INTVAL (op1
);
4726 unsigned HOST_WIDE_INT ml
;
4730 pre_shift
= floor_log2 (d
& -d
);
4731 ml
= invert_mod2n (d
>> pre_shift
, size
);
4732 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4733 pre_shift
, NULL_RTX
, unsignedp
);
4734 quotient
= expand_mult (compute_mode
, t1
,
4735 gen_int_mode (ml
, compute_mode
),
4738 insn
= get_last_insn ();
4739 set_dst_reg_note (insn
, REG_EQUAL
,
4740 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
4741 compute_mode
, op0
, op1
),
4746 case ROUND_DIV_EXPR
:
4747 case ROUND_MOD_EXPR
:
4752 label
= gen_label_rtx ();
4753 quotient
= gen_reg_rtx (compute_mode
);
4754 remainder
= gen_reg_rtx (compute_mode
);
4755 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
, remainder
, 1) == 0)
4758 quotient
= expand_binop (compute_mode
, udiv_optab
, op0
, op1
,
4759 quotient
, 1, OPTAB_LIB_WIDEN
);
4760 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 1);
4761 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4762 remainder
, 1, OPTAB_LIB_WIDEN
);
4764 tem
= plus_constant (compute_mode
, op1
, -1);
4765 tem
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
, 1, NULL_RTX
, 1);
4766 do_cmp_and_jump (remainder
, tem
, LEU
, compute_mode
, label
);
4767 expand_inc (quotient
, const1_rtx
);
4768 expand_dec (remainder
, op1
);
4773 rtx abs_rem
, abs_op1
, tem
, mask
;
4775 label
= gen_label_rtx ();
4776 quotient
= gen_reg_rtx (compute_mode
);
4777 remainder
= gen_reg_rtx (compute_mode
);
4778 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
, remainder
, 0) == 0)
4781 quotient
= expand_binop (compute_mode
, sdiv_optab
, op0
, op1
,
4782 quotient
, 0, OPTAB_LIB_WIDEN
);
4783 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 0);
4784 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4785 remainder
, 0, OPTAB_LIB_WIDEN
);
4787 abs_rem
= expand_abs (compute_mode
, remainder
, NULL_RTX
, 1, 0);
4788 abs_op1
= expand_abs (compute_mode
, op1
, NULL_RTX
, 1, 0);
4789 tem
= expand_shift (LSHIFT_EXPR
, compute_mode
, abs_rem
,
4791 do_cmp_and_jump (tem
, abs_op1
, LTU
, compute_mode
, label
);
4792 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4793 NULL_RTX
, 0, OPTAB_WIDEN
);
4794 mask
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4795 size
- 1, NULL_RTX
, 0);
4796 tem
= expand_binop (compute_mode
, xor_optab
, mask
, const1_rtx
,
4797 NULL_RTX
, 0, OPTAB_WIDEN
);
4798 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4799 NULL_RTX
, 0, OPTAB_WIDEN
);
4800 expand_inc (quotient
, tem
);
4801 tem
= expand_binop (compute_mode
, xor_optab
, mask
, op1
,
4802 NULL_RTX
, 0, OPTAB_WIDEN
);
4803 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4804 NULL_RTX
, 0, OPTAB_WIDEN
);
4805 expand_dec (remainder
, tem
);
4808 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4816 if (target
&& GET_MODE (target
) != compute_mode
)
4821 /* Try to produce the remainder without producing the quotient.
4822 If we seem to have a divmod pattern that does not require widening,
4823 don't try widening here. We should really have a WIDEN argument
4824 to expand_twoval_binop, since what we'd really like to do here is
4825 1) try a mod insn in compute_mode
4826 2) try a divmod insn in compute_mode
4827 3) try a div insn in compute_mode and multiply-subtract to get
4829 4) try the same things with widening allowed. */
4831 = sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4834 ((optab_handler (optab2
, compute_mode
)
4835 != CODE_FOR_nothing
)
4836 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4839 /* No luck there. Can we do remainder and divide at once
4840 without a library call? */
4841 remainder
= gen_reg_rtx (compute_mode
);
4842 if (! expand_twoval_binop ((unsignedp
4846 NULL_RTX
, remainder
, unsignedp
))
4851 return gen_lowpart (mode
, remainder
);
4854 /* Produce the quotient. Try a quotient insn, but not a library call.
4855 If we have a divmod in this mode, use it in preference to widening
4856 the div (for this test we assume it will not fail). Note that optab2
4857 is set to the one of the two optabs that the call below will use. */
4859 = sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
4860 op0
, op1
, rem_flag
? NULL_RTX
: target
,
4862 ((optab_handler (optab2
, compute_mode
)
4863 != CODE_FOR_nothing
)
4864 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4868 /* No luck there. Try a quotient-and-remainder insn,
4869 keeping the quotient alone. */
4870 quotient
= gen_reg_rtx (compute_mode
);
4871 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
4873 quotient
, NULL_RTX
, unsignedp
))
4877 /* Still no luck. If we are not computing the remainder,
4878 use a library call for the quotient. */
4879 quotient
= sign_expand_binop (compute_mode
,
4880 udiv_optab
, sdiv_optab
,
4882 unsignedp
, OPTAB_LIB_WIDEN
);
4889 if (target
&& GET_MODE (target
) != compute_mode
)
4894 /* No divide instruction either. Use library for remainder. */
4895 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4897 unsignedp
, OPTAB_LIB_WIDEN
);
4898 /* No remainder function. Try a quotient-and-remainder
4899 function, keeping the remainder. */
4902 remainder
= gen_reg_rtx (compute_mode
);
4903 if (!expand_twoval_binop_libfunc
4904 (unsignedp
? udivmod_optab
: sdivmod_optab
,
4906 NULL_RTX
, remainder
,
4907 unsignedp
? UMOD
: MOD
))
4908 remainder
= NULL_RTX
;
4913 /* We divided. Now finish doing X - Y * (X / Y). */
4914 remainder
= expand_mult (compute_mode
, quotient
, op1
,
4915 NULL_RTX
, unsignedp
);
4916 remainder
= expand_binop (compute_mode
, sub_optab
, op0
,
4917 remainder
, target
, unsignedp
,
4922 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4925 /* Return a tree node with data type TYPE, describing the value of X.
4926 Usually this is an VAR_DECL, if there is no obvious better choice.
4927 X may be an expression, however we only support those expressions
4928 generated by loop.c. */
4931 make_tree (tree type
, rtx x
)
4935 switch (GET_CODE (x
))
4939 HOST_WIDE_INT hi
= 0;
4942 && !(TYPE_UNSIGNED (type
)
4943 && (GET_MODE_BITSIZE (TYPE_MODE (type
))
4944 < HOST_BITS_PER_WIDE_INT
)))
4947 t
= build_int_cst_wide (type
, INTVAL (x
), hi
);
4953 if (GET_MODE (x
) == VOIDmode
)
4954 t
= build_int_cst_wide (type
,
4955 CONST_DOUBLE_LOW (x
), CONST_DOUBLE_HIGH (x
));
4960 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
4961 t
= build_real (type
, d
);
4968 int units
= CONST_VECTOR_NUNITS (x
);
4969 tree itype
= TREE_TYPE (type
);
4973 /* Build a tree with vector elements. */
4974 elts
= XALLOCAVEC (tree
, units
);
4975 for (i
= units
- 1; i
>= 0; --i
)
4977 rtx elt
= CONST_VECTOR_ELT (x
, i
);
4978 elts
[i
] = make_tree (itype
, elt
);
4981 return build_vector (type
, elts
);
4985 return fold_build2 (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4986 make_tree (type
, XEXP (x
, 1)));
4989 return fold_build2 (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4990 make_tree (type
, XEXP (x
, 1)));
4993 return fold_build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0)));
4996 return fold_build2 (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4997 make_tree (type
, XEXP (x
, 1)));
5000 return fold_build2 (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5001 make_tree (type
, XEXP (x
, 1)));
5004 t
= unsigned_type_for (type
);
5005 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5006 make_tree (t
, XEXP (x
, 0)),
5007 make_tree (type
, XEXP (x
, 1))));
5010 t
= signed_type_for (type
);
5011 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5012 make_tree (t
, XEXP (x
, 0)),
5013 make_tree (type
, XEXP (x
, 1))));
5016 if (TREE_CODE (type
) != REAL_TYPE
)
5017 t
= signed_type_for (type
);
5021 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5022 make_tree (t
, XEXP (x
, 0)),
5023 make_tree (t
, XEXP (x
, 1))));
5025 t
= unsigned_type_for (type
);
5026 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5027 make_tree (t
, XEXP (x
, 0)),
5028 make_tree (t
, XEXP (x
, 1))));
5032 t
= lang_hooks
.types
.type_for_mode (GET_MODE (XEXP (x
, 0)),
5033 GET_CODE (x
) == ZERO_EXTEND
);
5034 return fold_convert (type
, make_tree (t
, XEXP (x
, 0)));
5037 return make_tree (type
, XEXP (x
, 0));
5040 t
= SYMBOL_REF_DECL (x
);
5042 return fold_convert (type
, build_fold_addr_expr (t
));
5043 /* else fall through. */
5046 t
= build_decl (RTL_LOCATION (x
), VAR_DECL
, NULL_TREE
, type
);
5048 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5049 address mode to pointer mode. */
5050 if (POINTER_TYPE_P (type
))
5051 x
= convert_memory_address_addr_space
5052 (TYPE_MODE (type
), x
, TYPE_ADDR_SPACE (TREE_TYPE (type
)));
5054 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5055 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5056 t
->decl_with_rtl
.rtl
= x
;
5062 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5063 and returning TARGET.
5065 If TARGET is 0, a pseudo-register or constant is returned. */
5068 expand_and (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
)
5072 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
5073 tem
= simplify_binary_operation (AND
, mode
, op0
, op1
);
5075 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
5079 else if (tem
!= target
)
5080 emit_move_insn (target
, tem
);
5084 /* Helper function for emit_store_flag. */
5086 emit_cstore (rtx target
, enum insn_code icode
, enum rtx_code code
,
5087 enum machine_mode mode
, enum machine_mode compare_mode
,
5088 int unsignedp
, rtx x
, rtx y
, int normalizep
,
5089 enum machine_mode target_mode
)
5091 struct expand_operand ops
[4];
5092 rtx op0
, last
, comparison
, subtarget
;
5093 enum machine_mode result_mode
= insn_data
[(int) icode
].operand
[0].mode
;
5095 last
= get_last_insn ();
5096 x
= prepare_operand (icode
, x
, 2, mode
, compare_mode
, unsignedp
);
5097 y
= prepare_operand (icode
, y
, 3, mode
, compare_mode
, unsignedp
);
5100 delete_insns_since (last
);
5104 if (target_mode
== VOIDmode
)
5105 target_mode
= result_mode
;
5107 target
= gen_reg_rtx (target_mode
);
5109 comparison
= gen_rtx_fmt_ee (code
, result_mode
, x
, y
);
5111 create_output_operand (&ops
[0], optimize
? NULL_RTX
: target
, result_mode
);
5112 create_fixed_operand (&ops
[1], comparison
);
5113 create_fixed_operand (&ops
[2], x
);
5114 create_fixed_operand (&ops
[3], y
);
5115 if (!maybe_expand_insn (icode
, 4, ops
))
5117 delete_insns_since (last
);
5120 subtarget
= ops
[0].value
;
5122 /* If we are converting to a wider mode, first convert to
5123 TARGET_MODE, then normalize. This produces better combining
5124 opportunities on machines that have a SIGN_EXTRACT when we are
5125 testing a single bit. This mostly benefits the 68k.
5127 If STORE_FLAG_VALUE does not have the sign bit set when
5128 interpreted in MODE, we can do this conversion as unsigned, which
5129 is usually more efficient. */
5130 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (result_mode
))
5132 convert_move (target
, subtarget
,
5133 val_signbit_known_clear_p (result_mode
,
5136 result_mode
= target_mode
;
5141 /* If we want to keep subexpressions around, don't reuse our last
5146 /* Now normalize to the proper value in MODE. Sometimes we don't
5147 have to do anything. */
5148 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
5150 /* STORE_FLAG_VALUE might be the most negative number, so write
5151 the comparison this way to avoid a compiler-time warning. */
5152 else if (- normalizep
== STORE_FLAG_VALUE
)
5153 op0
= expand_unop (result_mode
, neg_optab
, op0
, subtarget
, 0);
5155 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5156 it hard to use a value of just the sign bit due to ANSI integer
5157 constant typing rules. */
5158 else if (val_signbit_known_set_p (result_mode
, STORE_FLAG_VALUE
))
5159 op0
= expand_shift (RSHIFT_EXPR
, result_mode
, op0
,
5160 GET_MODE_BITSIZE (result_mode
) - 1, subtarget
,
5164 gcc_assert (STORE_FLAG_VALUE
& 1);
5166 op0
= expand_and (result_mode
, op0
, const1_rtx
, subtarget
);
5167 if (normalizep
== -1)
5168 op0
= expand_unop (result_mode
, neg_optab
, op0
, op0
, 0);
5171 /* If we were converting to a smaller mode, do the conversion now. */
5172 if (target_mode
!= result_mode
)
5174 convert_move (target
, op0
, 0);
5182 /* A subroutine of emit_store_flag only including "tricks" that do not
5183 need a recursive call. These are kept separate to avoid infinite
5187 emit_store_flag_1 (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5188 enum machine_mode mode
, int unsignedp
, int normalizep
,
5189 enum machine_mode target_mode
)
5192 enum insn_code icode
;
5193 enum machine_mode compare_mode
;
5194 enum mode_class mclass
;
5195 enum rtx_code scode
;
5199 code
= unsigned_condition (code
);
5200 scode
= swap_condition (code
);
5202 /* If one operand is constant, make it the second one. Only do this
5203 if the other operand is not constant as well. */
5205 if (swap_commutative_operands_p (op0
, op1
))
5210 code
= swap_condition (code
);
5213 if (mode
== VOIDmode
)
5214 mode
= GET_MODE (op0
);
5216 /* For some comparisons with 1 and -1, we can convert this to
5217 comparisons with zero. This will often produce more opportunities for
5218 store-flag insns. */
5223 if (op1
== const1_rtx
)
5224 op1
= const0_rtx
, code
= LE
;
5227 if (op1
== constm1_rtx
)
5228 op1
= const0_rtx
, code
= LT
;
5231 if (op1
== const1_rtx
)
5232 op1
= const0_rtx
, code
= GT
;
5235 if (op1
== constm1_rtx
)
5236 op1
= const0_rtx
, code
= GE
;
5239 if (op1
== const1_rtx
)
5240 op1
= const0_rtx
, code
= NE
;
5243 if (op1
== const1_rtx
)
5244 op1
= const0_rtx
, code
= EQ
;
5250 /* If we are comparing a double-word integer with zero or -1, we can
5251 convert the comparison into one involving a single word. */
5252 if (GET_MODE_BITSIZE (mode
) == BITS_PER_WORD
* 2
5253 && GET_MODE_CLASS (mode
) == MODE_INT
5254 && (!MEM_P (op0
) || ! MEM_VOLATILE_P (op0
)))
5256 if ((code
== EQ
|| code
== NE
)
5257 && (op1
== const0_rtx
|| op1
== constm1_rtx
))
5261 /* Do a logical OR or AND of the two words and compare the
5263 op00
= simplify_gen_subreg (word_mode
, op0
, mode
, 0);
5264 op01
= simplify_gen_subreg (word_mode
, op0
, mode
, UNITS_PER_WORD
);
5265 tem
= expand_binop (word_mode
,
5266 op1
== const0_rtx
? ior_optab
: and_optab
,
5267 op00
, op01
, NULL_RTX
, unsignedp
,
5271 tem
= emit_store_flag (NULL_RTX
, code
, tem
, op1
, word_mode
,
5272 unsignedp
, normalizep
);
5274 else if ((code
== LT
|| code
== GE
) && op1
== const0_rtx
)
5278 /* If testing the sign bit, can just test on high word. */
5279 op0h
= simplify_gen_subreg (word_mode
, op0
, mode
,
5280 subreg_highpart_offset (word_mode
,
5282 tem
= emit_store_flag (NULL_RTX
, code
, op0h
, op1
, word_mode
,
5283 unsignedp
, normalizep
);
5290 if (target_mode
== VOIDmode
|| GET_MODE (tem
) == target_mode
)
5293 target
= gen_reg_rtx (target_mode
);
5295 convert_move (target
, tem
,
5296 !val_signbit_known_set_p (word_mode
,
5297 (normalizep
? normalizep
5298 : STORE_FLAG_VALUE
)));
5303 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5304 complement of A (for GE) and shifting the sign bit to the low bit. */
5305 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
5306 && GET_MODE_CLASS (mode
) == MODE_INT
5307 && (normalizep
|| STORE_FLAG_VALUE
== 1
5308 || val_signbit_p (mode
, STORE_FLAG_VALUE
)))
5315 /* If the result is to be wider than OP0, it is best to convert it
5316 first. If it is to be narrower, it is *incorrect* to convert it
5318 else if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (mode
))
5320 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5324 if (target_mode
!= mode
)
5328 op0
= expand_unop (mode
, one_cmpl_optab
, op0
,
5329 ((STORE_FLAG_VALUE
== 1 || normalizep
)
5330 ? 0 : subtarget
), 0);
5332 if (STORE_FLAG_VALUE
== 1 || normalizep
)
5333 /* If we are supposed to produce a 0/1 value, we want to do
5334 a logical shift from the sign bit to the low-order bit; for
5335 a -1/0 value, we do an arithmetic shift. */
5336 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5337 GET_MODE_BITSIZE (mode
) - 1,
5338 subtarget
, normalizep
!= -1);
5340 if (mode
!= target_mode
)
5341 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5346 mclass
= GET_MODE_CLASS (mode
);
5347 for (compare_mode
= mode
; compare_mode
!= VOIDmode
;
5348 compare_mode
= GET_MODE_WIDER_MODE (compare_mode
))
5350 enum machine_mode optab_mode
= mclass
== MODE_CC
? CCmode
: compare_mode
;
5351 icode
= optab_handler (cstore_optab
, optab_mode
);
5352 if (icode
!= CODE_FOR_nothing
)
5354 do_pending_stack_adjust ();
5355 tem
= emit_cstore (target
, icode
, code
, mode
, compare_mode
,
5356 unsignedp
, op0
, op1
, normalizep
, target_mode
);
5360 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5362 tem
= emit_cstore (target
, icode
, scode
, mode
, compare_mode
,
5363 unsignedp
, op1
, op0
, normalizep
, target_mode
);
5374 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5375 and storing in TARGET. Normally return TARGET.
5376 Return 0 if that cannot be done.
5378 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5379 it is VOIDmode, they cannot both be CONST_INT.
5381 UNSIGNEDP is for the case where we have to widen the operands
5382 to perform the operation. It says to use zero-extension.
5384 NORMALIZEP is 1 if we should convert the result to be either zero
5385 or one. Normalize is -1 if we should convert the result to be
5386 either zero or -1. If NORMALIZEP is zero, the result will be left
5387 "raw" out of the scc insn. */
5390 emit_store_flag (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5391 enum machine_mode mode
, int unsignedp
, int normalizep
)
5393 enum machine_mode target_mode
= target
? GET_MODE (target
) : VOIDmode
;
5394 enum rtx_code rcode
;
5396 rtx tem
, last
, trueval
;
5398 tem
= emit_store_flag_1 (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
,
5403 /* If we reached here, we can't do this with a scc insn, however there
5404 are some comparisons that can be done in other ways. Don't do any
5405 of these cases if branches are very cheap. */
5406 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5409 /* See what we need to return. We can only return a 1, -1, or the
5412 if (normalizep
== 0)
5414 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
5415 normalizep
= STORE_FLAG_VALUE
;
5417 else if (val_signbit_p (mode
, STORE_FLAG_VALUE
))
5423 last
= get_last_insn ();
5425 /* If optimizing, use different pseudo registers for each insn, instead
5426 of reusing the same pseudo. This leads to better CSE, but slows
5427 down the compiler, since there are more pseudos */
5428 subtarget
= (!optimize
5429 && (target_mode
== mode
)) ? target
: NULL_RTX
;
5430 trueval
= GEN_INT (normalizep
? normalizep
: STORE_FLAG_VALUE
);
5432 /* For floating-point comparisons, try the reverse comparison or try
5433 changing the "orderedness" of the comparison. */
5434 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5436 enum rtx_code first_code
;
5439 rcode
= reverse_condition_maybe_unordered (code
);
5440 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5441 && (code
== ORDERED
|| code
== UNORDERED
5442 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5443 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5445 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5446 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5448 /* For the reverse comparison, use either an addition or a XOR. */
5450 && rtx_cost (GEN_INT (normalizep
), PLUS
, 1,
5451 optimize_insn_for_speed_p ()) == 0)
5453 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5454 STORE_FLAG_VALUE
, target_mode
);
5456 return expand_binop (target_mode
, add_optab
, tem
,
5457 GEN_INT (normalizep
),
5458 target
, 0, OPTAB_WIDEN
);
5461 && rtx_cost (trueval
, XOR
, 1,
5462 optimize_insn_for_speed_p ()) == 0)
5464 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5465 normalizep
, target_mode
);
5467 return expand_binop (target_mode
, xor_optab
, tem
, trueval
,
5468 target
, INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5472 delete_insns_since (last
);
5474 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5475 if (code
== ORDERED
|| code
== UNORDERED
)
5478 and_them
= split_comparison (code
, mode
, &first_code
, &code
);
5480 /* If there are no NaNs, the first comparison should always fall through.
5481 Effectively change the comparison to the other one. */
5482 if (!HONOR_NANS (mode
))
5484 gcc_assert (first_code
== (and_them
? ORDERED
: UNORDERED
));
5485 return emit_store_flag_1 (target
, code
, op0
, op1
, mode
, 0, normalizep
,
5489 #ifdef HAVE_conditional_move
5490 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5491 conditional move. */
5492 tem
= emit_store_flag_1 (subtarget
, first_code
, op0
, op1
, mode
, 0,
5493 normalizep
, target_mode
);
5498 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5499 tem
, const0_rtx
, GET_MODE (tem
), 0);
5501 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5502 trueval
, tem
, GET_MODE (tem
), 0);
5505 delete_insns_since (last
);
5512 /* The remaining tricks only apply to integer comparisons. */
5514 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5517 /* If this is an equality comparison of integers, we can try to exclusive-or
5518 (or subtract) the two operands and use a recursive call to try the
5519 comparison with zero. Don't do any of these cases if branches are
5522 if ((code
== EQ
|| code
== NE
) && op1
!= const0_rtx
)
5524 tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
5528 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
5531 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
5532 mode
, unsignedp
, normalizep
);
5536 delete_insns_since (last
);
5539 /* For integer comparisons, try the reverse comparison. However, for
5540 small X and if we'd have anyway to extend, implementing "X != 0"
5541 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5542 rcode
= reverse_condition (code
);
5543 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5544 && ! (optab_handler (cstore_optab
, mode
) == CODE_FOR_nothing
5546 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
5547 && op1
== const0_rtx
))
5549 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5550 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5552 /* Again, for the reverse comparison, use either an addition or a XOR. */
5554 && rtx_cost (GEN_INT (normalizep
), PLUS
, 1,
5555 optimize_insn_for_speed_p ()) == 0)
5557 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5558 STORE_FLAG_VALUE
, target_mode
);
5560 tem
= expand_binop (target_mode
, add_optab
, tem
,
5561 GEN_INT (normalizep
), target
, 0, OPTAB_WIDEN
);
5564 && rtx_cost (trueval
, XOR
, 1,
5565 optimize_insn_for_speed_p ()) == 0)
5567 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5568 normalizep
, target_mode
);
5570 tem
= expand_binop (target_mode
, xor_optab
, tem
, trueval
, target
,
5571 INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5576 delete_insns_since (last
);
5579 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5580 the constant zero. Reject all other comparisons at this point. Only
5581 do LE and GT if branches are expensive since they are expensive on
5582 2-operand machines. */
5584 if (op1
!= const0_rtx
5585 || (code
!= EQ
&& code
!= NE
5586 && (BRANCH_COST (optimize_insn_for_speed_p (),
5587 false) <= 1 || (code
!= LE
&& code
!= GT
))))
5590 /* Try to put the result of the comparison in the sign bit. Assume we can't
5591 do the necessary operation below. */
5595 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5596 the sign bit set. */
5600 /* This is destructive, so SUBTARGET can't be OP0. */
5601 if (rtx_equal_p (subtarget
, op0
))
5604 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
5607 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
5611 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5612 number of bits in the mode of OP0, minus one. */
5616 if (rtx_equal_p (subtarget
, op0
))
5619 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5620 GET_MODE_BITSIZE (mode
) - 1,
5622 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
5626 if (code
== EQ
|| code
== NE
)
5628 /* For EQ or NE, one way to do the comparison is to apply an operation
5629 that converts the operand into a positive number if it is nonzero
5630 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5631 for NE we negate. This puts the result in the sign bit. Then we
5632 normalize with a shift, if needed.
5634 Two operations that can do the above actions are ABS and FFS, so try
5635 them. If that doesn't work, and MODE is smaller than a full word,
5636 we can use zero-extension to the wider mode (an unsigned conversion)
5637 as the operation. */
5639 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5640 that is compensated by the subsequent overflow when subtracting
5643 if (optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)
5644 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
5645 else if (optab_handler (ffs_optab
, mode
) != CODE_FOR_nothing
)
5646 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
5647 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
5649 tem
= convert_modes (word_mode
, mode
, op0
, 1);
5656 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
5659 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
5662 /* If we couldn't do it that way, for NE we can "or" the two's complement
5663 of the value with itself. For EQ, we take the one's complement of
5664 that "or", which is an extra insn, so we only handle EQ if branches
5669 || BRANCH_COST (optimize_insn_for_speed_p (),
5672 if (rtx_equal_p (subtarget
, op0
))
5675 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
5676 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
5679 if (tem
&& code
== EQ
)
5680 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
5684 if (tem
&& normalizep
)
5685 tem
= expand_shift (RSHIFT_EXPR
, mode
, tem
,
5686 GET_MODE_BITSIZE (mode
) - 1,
5687 subtarget
, normalizep
== 1);
5693 else if (GET_MODE (tem
) != target_mode
)
5695 convert_move (target
, tem
, 0);
5698 else if (!subtarget
)
5700 emit_move_insn (target
, tem
);
5705 delete_insns_since (last
);
5710 /* Like emit_store_flag, but always succeeds. */
5713 emit_store_flag_force (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5714 enum machine_mode mode
, int unsignedp
, int normalizep
)
5717 rtx trueval
, falseval
;
5719 /* First see if emit_store_flag can do the job. */
5720 tem
= emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
);
5725 target
= gen_reg_rtx (word_mode
);
5727 /* If this failed, we have to do this with set/compare/jump/set code.
5728 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5729 trueval
= normalizep
? GEN_INT (normalizep
) : const1_rtx
;
5731 && GET_MODE_CLASS (mode
) == MODE_INT
5734 && op1
== const0_rtx
)
5736 label
= gen_label_rtx ();
5737 do_compare_rtx_and_jump (target
, const0_rtx
, EQ
, unsignedp
,
5738 mode
, NULL_RTX
, NULL_RTX
, label
, -1);
5739 emit_move_insn (target
, trueval
);
5745 || reg_mentioned_p (target
, op0
) || reg_mentioned_p (target
, op1
))
5746 target
= gen_reg_rtx (GET_MODE (target
));
5748 /* Jump in the right direction if the target cannot implement CODE
5749 but can jump on its reverse condition. */
5750 falseval
= const0_rtx
;
5751 if (! can_compare_p (code
, mode
, ccp_jump
)
5752 && (! FLOAT_MODE_P (mode
)
5753 || code
== ORDERED
|| code
== UNORDERED
5754 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5755 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5757 enum rtx_code rcode
;
5758 if (FLOAT_MODE_P (mode
))
5759 rcode
= reverse_condition_maybe_unordered (code
);
5761 rcode
= reverse_condition (code
);
5763 /* Canonicalize to UNORDERED for the libcall. */
5764 if (can_compare_p (rcode
, mode
, ccp_jump
)
5765 || (code
== ORDERED
&& ! can_compare_p (ORDERED
, mode
, ccp_jump
)))
5768 trueval
= const0_rtx
;
5773 emit_move_insn (target
, trueval
);
5774 label
= gen_label_rtx ();
5775 do_compare_rtx_and_jump (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
,
5776 NULL_RTX
, label
, -1);
5778 emit_move_insn (target
, falseval
);
5784 /* Perform possibly multi-word comparison and conditional jump to LABEL
5785 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5786 now a thin wrapper around do_compare_rtx_and_jump. */
5789 do_cmp_and_jump (rtx arg1
, rtx arg2
, enum rtx_code op
, enum machine_mode mode
,
5792 int unsignedp
= (op
== LTU
|| op
== LEU
|| op
== GTU
|| op
== GEU
);
5793 do_compare_rtx_and_jump (arg1
, arg2
, op
, unsignedp
, mode
,
5794 NULL_RTX
, NULL_RTX
, label
, -1);