1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
28 #include "diagnostic-core.h"
33 #include "insn-config.h"
37 #include "langhooks.h"
42 struct target_expmed default_target_expmed
;
44 struct target_expmed
*this_target_expmed
= &default_target_expmed
;
47 static void store_fixed_bit_field (rtx
, unsigned HOST_WIDE_INT
,
48 unsigned HOST_WIDE_INT
,
49 unsigned HOST_WIDE_INT
, rtx
);
50 static void store_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
51 unsigned HOST_WIDE_INT
, rtx
);
52 static rtx
extract_fixed_bit_field (enum machine_mode
, rtx
,
53 unsigned HOST_WIDE_INT
,
54 unsigned HOST_WIDE_INT
,
55 unsigned HOST_WIDE_INT
, rtx
, int, bool);
56 static rtx
mask_rtx (enum machine_mode
, int, int, int);
57 static rtx
lshift_value (enum machine_mode
, rtx
, int, int);
58 static rtx
extract_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
59 unsigned HOST_WIDE_INT
, int);
60 static void do_cmp_and_jump (rtx
, rtx
, enum rtx_code
, enum machine_mode
, rtx
);
61 static rtx
expand_smod_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
62 static rtx
expand_sdiv_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
64 /* Test whether a value is zero of a power of two. */
65 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
67 #ifndef SLOW_UNALIGNED_ACCESS
68 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
72 /* Reduce conditional compilation elsewhere. */
75 #define CODE_FOR_insv CODE_FOR_nothing
76 #define gen_insv(a,b,c,d) NULL_RTX
80 #define CODE_FOR_extv CODE_FOR_nothing
81 #define gen_extv(a,b,c,d) NULL_RTX
85 #define CODE_FOR_extzv CODE_FOR_nothing
86 #define gen_extzv(a,b,c,d) NULL_RTX
94 struct rtx_def reg
; rtunion reg_fld
[2];
95 struct rtx_def plus
; rtunion plus_fld1
;
97 struct rtx_def mult
; rtunion mult_fld1
;
98 struct rtx_def sdiv
; rtunion sdiv_fld1
;
99 struct rtx_def udiv
; rtunion udiv_fld1
;
101 struct rtx_def sdiv_32
; rtunion sdiv_32_fld1
;
102 struct rtx_def smod_32
; rtunion smod_32_fld1
;
103 struct rtx_def wide_mult
; rtunion wide_mult_fld1
;
104 struct rtx_def wide_lshr
; rtunion wide_lshr_fld1
;
105 struct rtx_def wide_trunc
;
106 struct rtx_def shift
; rtunion shift_fld1
;
107 struct rtx_def shift_mult
; rtunion shift_mult_fld1
;
108 struct rtx_def shift_add
; rtunion shift_add_fld1
;
109 struct rtx_def shift_sub0
; rtunion shift_sub0_fld1
;
110 struct rtx_def shift_sub1
; rtunion shift_sub1_fld1
;
113 rtx pow2
[MAX_BITS_PER_WORD
];
114 rtx cint
[MAX_BITS_PER_WORD
];
116 enum machine_mode mode
, wider_mode
;
120 for (m
= 1; m
< MAX_BITS_PER_WORD
; m
++)
122 pow2
[m
] = GEN_INT ((HOST_WIDE_INT
) 1 << m
);
123 cint
[m
] = GEN_INT (m
);
125 memset (&all
, 0, sizeof all
);
127 PUT_CODE (&all
.reg
, REG
);
128 /* Avoid using hard regs in ways which may be unsupported. */
129 SET_REGNO (&all
.reg
, LAST_VIRTUAL_REGISTER
+ 1);
131 PUT_CODE (&all
.plus
, PLUS
);
132 XEXP (&all
.plus
, 0) = &all
.reg
;
133 XEXP (&all
.plus
, 1) = &all
.reg
;
135 PUT_CODE (&all
.neg
, NEG
);
136 XEXP (&all
.neg
, 0) = &all
.reg
;
138 PUT_CODE (&all
.mult
, MULT
);
139 XEXP (&all
.mult
, 0) = &all
.reg
;
140 XEXP (&all
.mult
, 1) = &all
.reg
;
142 PUT_CODE (&all
.sdiv
, DIV
);
143 XEXP (&all
.sdiv
, 0) = &all
.reg
;
144 XEXP (&all
.sdiv
, 1) = &all
.reg
;
146 PUT_CODE (&all
.udiv
, UDIV
);
147 XEXP (&all
.udiv
, 0) = &all
.reg
;
148 XEXP (&all
.udiv
, 1) = &all
.reg
;
150 PUT_CODE (&all
.sdiv_32
, DIV
);
151 XEXP (&all
.sdiv_32
, 0) = &all
.reg
;
152 XEXP (&all
.sdiv_32
, 1) = 32 < MAX_BITS_PER_WORD
? cint
[32] : GEN_INT (32);
154 PUT_CODE (&all
.smod_32
, MOD
);
155 XEXP (&all
.smod_32
, 0) = &all
.reg
;
156 XEXP (&all
.smod_32
, 1) = XEXP (&all
.sdiv_32
, 1);
158 PUT_CODE (&all
.zext
, ZERO_EXTEND
);
159 XEXP (&all
.zext
, 0) = &all
.reg
;
161 PUT_CODE (&all
.wide_mult
, MULT
);
162 XEXP (&all
.wide_mult
, 0) = &all
.zext
;
163 XEXP (&all
.wide_mult
, 1) = &all
.zext
;
165 PUT_CODE (&all
.wide_lshr
, LSHIFTRT
);
166 XEXP (&all
.wide_lshr
, 0) = &all
.wide_mult
;
168 PUT_CODE (&all
.wide_trunc
, TRUNCATE
);
169 XEXP (&all
.wide_trunc
, 0) = &all
.wide_lshr
;
171 PUT_CODE (&all
.shift
, ASHIFT
);
172 XEXP (&all
.shift
, 0) = &all
.reg
;
174 PUT_CODE (&all
.shift_mult
, MULT
);
175 XEXP (&all
.shift_mult
, 0) = &all
.reg
;
177 PUT_CODE (&all
.shift_add
, PLUS
);
178 XEXP (&all
.shift_add
, 0) = &all
.shift_mult
;
179 XEXP (&all
.shift_add
, 1) = &all
.reg
;
181 PUT_CODE (&all
.shift_sub0
, MINUS
);
182 XEXP (&all
.shift_sub0
, 0) = &all
.shift_mult
;
183 XEXP (&all
.shift_sub0
, 1) = &all
.reg
;
185 PUT_CODE (&all
.shift_sub1
, MINUS
);
186 XEXP (&all
.shift_sub1
, 0) = &all
.reg
;
187 XEXP (&all
.shift_sub1
, 1) = &all
.shift_mult
;
189 for (speed
= 0; speed
< 2; speed
++)
191 crtl
->maybe_hot_insn_p
= speed
;
192 zero_cost
[speed
] = rtx_cost (const0_rtx
, SET
, speed
);
194 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
196 mode
= GET_MODE_WIDER_MODE (mode
))
198 PUT_MODE (&all
.reg
, mode
);
199 PUT_MODE (&all
.plus
, mode
);
200 PUT_MODE (&all
.neg
, mode
);
201 PUT_MODE (&all
.mult
, mode
);
202 PUT_MODE (&all
.sdiv
, mode
);
203 PUT_MODE (&all
.udiv
, mode
);
204 PUT_MODE (&all
.sdiv_32
, mode
);
205 PUT_MODE (&all
.smod_32
, mode
);
206 PUT_MODE (&all
.wide_trunc
, mode
);
207 PUT_MODE (&all
.shift
, mode
);
208 PUT_MODE (&all
.shift_mult
, mode
);
209 PUT_MODE (&all
.shift_add
, mode
);
210 PUT_MODE (&all
.shift_sub0
, mode
);
211 PUT_MODE (&all
.shift_sub1
, mode
);
213 add_cost
[speed
][mode
] = rtx_cost (&all
.plus
, SET
, speed
);
214 neg_cost
[speed
][mode
] = rtx_cost (&all
.neg
, SET
, speed
);
215 mul_cost
[speed
][mode
] = rtx_cost (&all
.mult
, SET
, speed
);
216 sdiv_cost
[speed
][mode
] = rtx_cost (&all
.sdiv
, SET
, speed
);
217 udiv_cost
[speed
][mode
] = rtx_cost (&all
.udiv
, SET
, speed
);
219 sdiv_pow2_cheap
[speed
][mode
] = (rtx_cost (&all
.sdiv_32
, SET
, speed
)
220 <= 2 * add_cost
[speed
][mode
]);
221 smod_pow2_cheap
[speed
][mode
] = (rtx_cost (&all
.smod_32
, SET
, speed
)
222 <= 4 * add_cost
[speed
][mode
]);
224 wider_mode
= GET_MODE_WIDER_MODE (mode
);
225 if (wider_mode
!= VOIDmode
)
227 PUT_MODE (&all
.zext
, wider_mode
);
228 PUT_MODE (&all
.wide_mult
, wider_mode
);
229 PUT_MODE (&all
.wide_lshr
, wider_mode
);
230 XEXP (&all
.wide_lshr
, 1) = GEN_INT (GET_MODE_BITSIZE (mode
));
232 mul_widen_cost
[speed
][wider_mode
]
233 = rtx_cost (&all
.wide_mult
, SET
, speed
);
234 mul_highpart_cost
[speed
][mode
]
235 = rtx_cost (&all
.wide_trunc
, SET
, speed
);
238 shift_cost
[speed
][mode
][0] = 0;
239 shiftadd_cost
[speed
][mode
][0] = shiftsub0_cost
[speed
][mode
][0]
240 = shiftsub1_cost
[speed
][mode
][0] = add_cost
[speed
][mode
];
242 n
= MIN (MAX_BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
243 for (m
= 1; m
< n
; m
++)
245 XEXP (&all
.shift
, 1) = cint
[m
];
246 XEXP (&all
.shift_mult
, 1) = pow2
[m
];
248 shift_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift
, SET
, speed
);
249 shiftadd_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift_add
, SET
, speed
);
250 shiftsub0_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift_sub0
, SET
, speed
);
251 shiftsub1_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift_sub1
, SET
, speed
);
256 memset (alg_hash
, 0, sizeof (alg_hash
));
258 alg_hash_used_p
= true;
259 default_rtl_profile ();
262 /* Return an rtx representing minus the value of X.
263 MODE is the intended mode of the result,
264 useful if X is a CONST_INT. */
267 negate_rtx (enum machine_mode mode
, rtx x
)
269 rtx result
= simplify_unary_operation (NEG
, mode
, x
, mode
);
272 result
= expand_unop (mode
, neg_optab
, x
, NULL_RTX
, 0);
277 /* Report on the availability of insv/extv/extzv and the desired mode
278 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
279 is false; else the mode of the specified operand. If OPNO is -1,
280 all the caller cares about is whether the insn is available. */
282 mode_for_extraction (enum extraction_pattern pattern
, int opno
)
284 const struct insn_data_d
*data
;
291 data
= &insn_data
[CODE_FOR_insv
];
294 return MAX_MACHINE_MODE
;
299 data
= &insn_data
[CODE_FOR_extv
];
302 return MAX_MACHINE_MODE
;
307 data
= &insn_data
[CODE_FOR_extzv
];
310 return MAX_MACHINE_MODE
;
319 /* Everyone who uses this function used to follow it with
320 if (result == VOIDmode) result = word_mode; */
321 if (data
->operand
[opno
].mode
== VOIDmode
)
323 return data
->operand
[opno
].mode
;
326 /* Return true if X, of mode MODE, matches the predicate for operand
327 OPNO of instruction ICODE. Allow volatile memories, regardless of
328 the ambient volatile_ok setting. */
331 check_predicate_volatile_ok (enum insn_code icode
, int opno
,
332 rtx x
, enum machine_mode mode
)
334 bool save_volatile_ok
, result
;
336 save_volatile_ok
= volatile_ok
;
337 result
= insn_data
[(int) icode
].operand
[opno
].predicate (x
, mode
);
338 volatile_ok
= save_volatile_ok
;
342 /* A subroutine of store_bit_field, with the same arguments. Return true
343 if the operation could be implemented.
345 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
346 no other way of implementing the operation. If FALLBACK_P is false,
347 return false instead. */
350 store_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
351 unsigned HOST_WIDE_INT bitnum
, enum machine_mode fieldmode
,
352 rtx value
, bool fallback_p
)
355 = (MEM_P (str_rtx
)) ? BITS_PER_UNIT
: BITS_PER_WORD
;
356 unsigned HOST_WIDE_INT offset
, bitpos
;
361 enum machine_mode op_mode
= mode_for_extraction (EP_insv
, 3);
363 while (GET_CODE (op0
) == SUBREG
)
365 /* The following line once was done only if WORDS_BIG_ENDIAN,
366 but I think that is a mistake. WORDS_BIG_ENDIAN is
367 meaningful at a much higher level; when structures are copied
368 between memory and regs, the higher-numbered regs
369 always get higher addresses. */
370 int inner_mode_size
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)));
371 int outer_mode_size
= GET_MODE_SIZE (GET_MODE (op0
));
375 /* Paradoxical subregs need special handling on big endian machines. */
376 if (SUBREG_BYTE (op0
) == 0 && inner_mode_size
< outer_mode_size
)
378 int difference
= inner_mode_size
- outer_mode_size
;
380 if (WORDS_BIG_ENDIAN
)
381 byte_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
382 if (BYTES_BIG_ENDIAN
)
383 byte_offset
+= difference
% UNITS_PER_WORD
;
386 byte_offset
= SUBREG_BYTE (op0
);
388 bitnum
+= byte_offset
* BITS_PER_UNIT
;
389 op0
= SUBREG_REG (op0
);
392 /* No action is needed if the target is a register and if the field
393 lies completely outside that register. This can occur if the source
394 code contains an out-of-bounds access to a small array. */
395 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
398 /* Use vec_set patterns for inserting parts of vectors whenever
400 if (VECTOR_MODE_P (GET_MODE (op0
))
402 && optab_handler (vec_set_optab
, GET_MODE (op0
)) != CODE_FOR_nothing
403 && fieldmode
== GET_MODE_INNER (GET_MODE (op0
))
404 && bitsize
== GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
405 && !(bitnum
% GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
407 enum machine_mode outermode
= GET_MODE (op0
);
408 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
409 int icode
= (int) optab_handler (vec_set_optab
, outermode
);
410 int pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
411 rtx rtxpos
= GEN_INT (pos
);
415 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
416 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
417 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
421 if (! (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
))
422 src
= copy_to_mode_reg (mode1
, src
);
424 if (! (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
))
425 rtxpos
= copy_to_mode_reg (mode1
, rtxpos
);
427 /* We could handle this, but we should always be called with a pseudo
428 for our targets and all insns should take them as outputs. */
429 gcc_assert ((*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
)
430 && (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
)
431 && (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
));
432 pat
= GEN_FCN (icode
) (dest
, src
, rtxpos
);
443 /* If the target is a register, overwriting the entire object, or storing
444 a full-word or multi-word field can be done with just a SUBREG.
446 If the target is memory, storing any naturally aligned field can be
447 done with a simple store. For targets that support fast unaligned
448 memory, any naturally sized, unit aligned field can be done directly. */
450 offset
= bitnum
/ unit
;
451 bitpos
= bitnum
% unit
;
452 byte_offset
= (bitnum
% BITS_PER_WORD
) / BITS_PER_UNIT
453 + (offset
* UNITS_PER_WORD
);
456 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
458 ? ((GET_MODE_SIZE (fieldmode
) >= UNITS_PER_WORD
459 || GET_MODE_SIZE (GET_MODE (op0
)) == GET_MODE_SIZE (fieldmode
))
460 && byte_offset
% GET_MODE_SIZE (fieldmode
) == 0)
461 : (! SLOW_UNALIGNED_ACCESS (fieldmode
, MEM_ALIGN (op0
))
462 || (offset
* BITS_PER_UNIT
% bitsize
== 0
463 && MEM_ALIGN (op0
) % GET_MODE_BITSIZE (fieldmode
) == 0))))
466 op0
= adjust_address (op0
, fieldmode
, offset
);
467 else if (GET_MODE (op0
) != fieldmode
)
468 op0
= simplify_gen_subreg (fieldmode
, op0
, GET_MODE (op0
),
470 emit_move_insn (op0
, value
);
474 /* Make sure we are playing with integral modes. Pun with subregs
475 if we aren't. This must come after the entire register case above,
476 since that case is valid for any mode. The following cases are only
477 valid for integral modes. */
479 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
480 if (imode
!= GET_MODE (op0
))
483 op0
= adjust_address (op0
, imode
, 0);
486 gcc_assert (imode
!= BLKmode
);
487 op0
= gen_lowpart (imode
, op0
);
492 /* We may be accessing data outside the field, which means
493 we can alias adjacent data. */
496 op0
= shallow_copy_rtx (op0
);
497 set_mem_alias_set (op0
, 0);
498 set_mem_expr (op0
, 0);
501 /* If OP0 is a register, BITPOS must count within a word.
502 But as we have it, it counts within whatever size OP0 now has.
503 On a bigendian machine, these are not the same, so convert. */
506 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
507 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
509 /* Storing an lsb-aligned field in a register
510 can be done with a movestrict instruction. */
513 && (BYTES_BIG_ENDIAN
? bitpos
+ bitsize
== unit
: bitpos
== 0)
514 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
515 && optab_handler (movstrict_optab
, fieldmode
) != CODE_FOR_nothing
)
517 int icode
= optab_handler (movstrict_optab
, fieldmode
);
519 rtx start
= get_last_insn ();
522 /* Get appropriate low part of the value being stored. */
523 if (CONST_INT_P (value
) || REG_P (value
))
524 value
= gen_lowpart (fieldmode
, value
);
525 else if (!(GET_CODE (value
) == SYMBOL_REF
526 || GET_CODE (value
) == LABEL_REF
527 || GET_CODE (value
) == CONST
))
528 value
= convert_to_mode (fieldmode
, value
, 0);
530 if (! (*insn_data
[icode
].operand
[1].predicate
) (value
, fieldmode
))
531 value
= copy_to_mode_reg (fieldmode
, value
);
533 if (GET_CODE (op0
) == SUBREG
)
535 /* Else we've got some float mode source being extracted into
536 a different float mode destination -- this combination of
537 subregs results in Severe Tire Damage. */
538 gcc_assert (GET_MODE (SUBREG_REG (op0
)) == fieldmode
539 || GET_MODE_CLASS (fieldmode
) == MODE_INT
540 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
);
541 arg0
= SUBREG_REG (op0
);
544 insn
= (GEN_FCN (icode
)
545 (gen_rtx_SUBREG (fieldmode
, arg0
,
546 (bitnum
% BITS_PER_WORD
) / BITS_PER_UNIT
547 + (offset
* UNITS_PER_WORD
)),
554 delete_insns_since (start
);
557 /* Handle fields bigger than a word. */
559 if (bitsize
> BITS_PER_WORD
)
561 /* Here we transfer the words of the field
562 in the order least significant first.
563 This is because the most significant word is the one which may
565 However, only do that if the value is not BLKmode. */
567 unsigned int backwards
= WORDS_BIG_ENDIAN
&& fieldmode
!= BLKmode
;
568 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
572 /* This is the mode we must force value to, so that there will be enough
573 subwords to extract. Note that fieldmode will often (always?) be
574 VOIDmode, because that is what store_field uses to indicate that this
575 is a bit field, but passing VOIDmode to operand_subword_force
577 fieldmode
= GET_MODE (value
);
578 if (fieldmode
== VOIDmode
)
579 fieldmode
= smallest_mode_for_size (nwords
* BITS_PER_WORD
, MODE_INT
);
581 last
= get_last_insn ();
582 for (i
= 0; i
< nwords
; i
++)
584 /* If I is 0, use the low-order word in both field and target;
585 if I is 1, use the next to lowest word; and so on. */
586 unsigned int wordnum
= (backwards
? nwords
- i
- 1 : i
);
587 unsigned int bit_offset
= (backwards
588 ? MAX ((int) bitsize
- ((int) i
+ 1)
591 : (int) i
* BITS_PER_WORD
);
592 rtx value_word
= operand_subword_force (value
, wordnum
, fieldmode
);
594 if (!store_bit_field_1 (op0
, MIN (BITS_PER_WORD
,
595 bitsize
- i
* BITS_PER_WORD
),
596 bitnum
+ bit_offset
, word_mode
,
597 value_word
, fallback_p
))
599 delete_insns_since (last
);
606 /* From here on we can assume that the field to be stored in is
607 a full-word (whatever type that is), since it is shorter than a word. */
609 /* OFFSET is the number of words or bytes (UNIT says which)
610 from STR_RTX to the first word or byte containing part of the field. */
615 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
619 /* Since this is a destination (lvalue), we can't copy
620 it to a pseudo. We can remove a SUBREG that does not
621 change the size of the operand. Such a SUBREG may
622 have been added above. */
623 gcc_assert (GET_CODE (op0
) == SUBREG
624 && (GET_MODE_SIZE (GET_MODE (op0
))
625 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)))));
626 op0
= SUBREG_REG (op0
);
628 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
629 op0
, (offset
* UNITS_PER_WORD
));
634 /* If VALUE has a floating-point or complex mode, access it as an
635 integer of the corresponding size. This can occur on a machine
636 with 64 bit registers that uses SFmode for float. It can also
637 occur for unaligned float or complex fields. */
639 if (GET_MODE (value
) != VOIDmode
640 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_INT
641 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_PARTIAL_INT
)
643 value
= gen_reg_rtx (int_mode_for_mode (GET_MODE (value
)));
644 emit_move_insn (gen_lowpart (GET_MODE (orig_value
), value
), orig_value
);
647 /* Now OFFSET is nonzero only if OP0 is memory
648 and is therefore always measured in bytes. */
651 && GET_MODE (value
) != BLKmode
653 && GET_MODE_BITSIZE (op_mode
) >= bitsize
654 && ! ((REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
655 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (op_mode
)))
656 && insn_data
[CODE_FOR_insv
].operand
[1].predicate (GEN_INT (bitsize
),
658 && check_predicate_volatile_ok (CODE_FOR_insv
, 0, op0
, VOIDmode
))
660 int xbitpos
= bitpos
;
663 rtx last
= get_last_insn ();
665 bool copy_back
= false;
667 /* Add OFFSET into OP0's address. */
669 xop0
= adjust_address (xop0
, byte_mode
, offset
);
671 /* If xop0 is a register, we need it in OP_MODE
672 to make it acceptable to the format of insv. */
673 if (GET_CODE (xop0
) == SUBREG
)
674 /* We can't just change the mode, because this might clobber op0,
675 and we will need the original value of op0 if insv fails. */
676 xop0
= gen_rtx_SUBREG (op_mode
, SUBREG_REG (xop0
), SUBREG_BYTE (xop0
));
677 if (REG_P (xop0
) && GET_MODE (xop0
) != op_mode
)
678 xop0
= gen_lowpart_SUBREG (op_mode
, xop0
);
680 /* If the destination is a paradoxical subreg such that we need a
681 truncate to the inner mode, perform the insertion on a temporary and
682 truncate the result to the original destination. Note that we can't
683 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
684 X) 0)) is (reg:N X). */
685 if (GET_CODE (xop0
) == SUBREG
686 && REG_P (SUBREG_REG (xop0
))
687 && (!TRULY_NOOP_TRUNCATION
688 (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (xop0
))),
689 GET_MODE_BITSIZE (op_mode
))))
691 rtx tem
= gen_reg_rtx (op_mode
);
692 emit_move_insn (tem
, xop0
);
697 /* On big-endian machines, we count bits from the most significant.
698 If the bit field insn does not, we must invert. */
700 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
701 xbitpos
= unit
- bitsize
- xbitpos
;
703 /* We have been counting XBITPOS within UNIT.
704 Count instead within the size of the register. */
705 if (BITS_BIG_ENDIAN
&& !MEM_P (xop0
))
706 xbitpos
+= GET_MODE_BITSIZE (op_mode
) - unit
;
708 unit
= GET_MODE_BITSIZE (op_mode
);
710 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
712 if (GET_MODE (value
) != op_mode
)
714 if (GET_MODE_BITSIZE (GET_MODE (value
)) >= bitsize
)
716 /* Optimization: Don't bother really extending VALUE
717 if it has all the bits we will actually use. However,
718 if we must narrow it, be sure we do it correctly. */
720 if (GET_MODE_SIZE (GET_MODE (value
)) < GET_MODE_SIZE (op_mode
))
724 tmp
= simplify_subreg (op_mode
, value1
, GET_MODE (value
), 0);
726 tmp
= simplify_gen_subreg (op_mode
,
727 force_reg (GET_MODE (value
),
729 GET_MODE (value
), 0);
733 value1
= gen_lowpart (op_mode
, value1
);
735 else if (CONST_INT_P (value
))
736 value1
= gen_int_mode (INTVAL (value
), op_mode
);
738 /* Parse phase is supposed to make VALUE's data type
739 match that of the component reference, which is a type
740 at least as wide as the field; so VALUE should have
741 a mode that corresponds to that type. */
742 gcc_assert (CONSTANT_P (value
));
745 /* If this machine's insv insists on a register,
746 get VALUE1 into a register. */
747 if (! ((*insn_data
[(int) CODE_FOR_insv
].operand
[3].predicate
)
749 value1
= force_reg (op_mode
, value1
);
751 pat
= gen_insv (xop0
, GEN_INT (bitsize
), GEN_INT (xbitpos
), value1
);
757 convert_move (op0
, xop0
, true);
760 delete_insns_since (last
);
763 /* If OP0 is a memory, try copying it to a register and seeing if a
764 cheap register alternative is available. */
765 if (HAVE_insv
&& MEM_P (op0
))
767 enum machine_mode bestmode
;
769 /* Get the mode to use for inserting into this field. If OP0 is
770 BLKmode, get the smallest mode consistent with the alignment. If
771 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
772 mode. Otherwise, use the smallest mode containing the field. */
774 if (GET_MODE (op0
) == BLKmode
775 || (op_mode
!= MAX_MACHINE_MODE
776 && GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (op_mode
)))
777 bestmode
= get_best_mode (bitsize
, bitnum
, MEM_ALIGN (op0
),
778 (op_mode
== MAX_MACHINE_MODE
779 ? VOIDmode
: op_mode
),
780 MEM_VOLATILE_P (op0
));
782 bestmode
= GET_MODE (op0
);
784 if (bestmode
!= VOIDmode
785 && GET_MODE_SIZE (bestmode
) >= GET_MODE_SIZE (fieldmode
)
786 && !(SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (op0
))
787 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (op0
)))
789 rtx last
, tempreg
, xop0
;
790 unsigned HOST_WIDE_INT xoffset
, xbitpos
;
792 last
= get_last_insn ();
794 /* Adjust address to point to the containing unit of
795 that mode. Compute the offset as a multiple of this unit,
796 counting in bytes. */
797 unit
= GET_MODE_BITSIZE (bestmode
);
798 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
799 xbitpos
= bitnum
% unit
;
800 xop0
= adjust_address (op0
, bestmode
, xoffset
);
802 /* Fetch that unit, store the bitfield in it, then store
804 tempreg
= copy_to_reg (xop0
);
805 if (store_bit_field_1 (tempreg
, bitsize
, xbitpos
,
806 fieldmode
, orig_value
, false))
808 emit_move_insn (xop0
, tempreg
);
811 delete_insns_since (last
);
818 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
);
822 /* Generate code to store value from rtx VALUE
823 into a bit-field within structure STR_RTX
824 containing BITSIZE bits starting at bit BITNUM.
825 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
828 store_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
829 unsigned HOST_WIDE_INT bitnum
, enum machine_mode fieldmode
,
832 if (!store_bit_field_1 (str_rtx
, bitsize
, bitnum
, fieldmode
, value
, true))
836 /* Use shifts and boolean operations to store VALUE
837 into a bit field of width BITSIZE
838 in a memory location specified by OP0 except offset by OFFSET bytes.
839 (OFFSET must be 0 if OP0 is a register.)
840 The field starts at position BITPOS within the byte.
841 (If OP0 is a register, it may be a full word or a narrower mode,
842 but BITPOS still counts within a full word,
843 which is significant on bigendian machines.) */
846 store_fixed_bit_field (rtx op0
, unsigned HOST_WIDE_INT offset
,
847 unsigned HOST_WIDE_INT bitsize
,
848 unsigned HOST_WIDE_INT bitpos
, rtx value
)
850 enum machine_mode mode
;
851 unsigned int total_bits
= BITS_PER_WORD
;
856 /* There is a case not handled here:
857 a structure with a known alignment of just a halfword
858 and a field split across two aligned halfwords within the structure.
859 Or likewise a structure with a known alignment of just a byte
860 and a field split across two bytes.
861 Such cases are not supposed to be able to occur. */
863 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
865 gcc_assert (!offset
);
866 /* Special treatment for a bit field split across two registers. */
867 if (bitsize
+ bitpos
> BITS_PER_WORD
)
869 store_split_bit_field (op0
, bitsize
, bitpos
, value
);
875 /* Get the proper mode to use for this field. We want a mode that
876 includes the entire field. If such a mode would be larger than
877 a word, we won't be doing the extraction the normal way.
878 We don't want a mode bigger than the destination. */
880 mode
= GET_MODE (op0
);
881 if (GET_MODE_BITSIZE (mode
) == 0
882 || GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (word_mode
))
885 if (MEM_VOLATILE_P (op0
)
886 && GET_MODE_BITSIZE (GET_MODE (op0
)) > 0
887 && flag_strict_volatile_bitfields
> 0)
888 mode
= GET_MODE (op0
);
890 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
891 MEM_ALIGN (op0
), mode
, MEM_VOLATILE_P (op0
));
893 if (mode
== VOIDmode
)
895 /* The only way this should occur is if the field spans word
897 store_split_bit_field (op0
, bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
902 total_bits
= GET_MODE_BITSIZE (mode
);
904 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
905 be in the range 0 to total_bits-1, and put any excess bytes in
907 if (bitpos
>= total_bits
)
909 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
910 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
914 /* Get ref to an aligned byte, halfword, or word containing the field.
915 Adjust BITPOS to be position within a word,
916 and OFFSET to be the offset of that word.
917 Then alter OP0 to refer to that word. */
918 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
919 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
920 op0
= adjust_address (op0
, mode
, offset
);
923 mode
= GET_MODE (op0
);
925 /* Now MODE is either some integral mode for a MEM as OP0,
926 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
927 The bit field is contained entirely within OP0.
928 BITPOS is the starting bit number within OP0.
929 (OP0's mode may actually be narrower than MODE.) */
931 if (BYTES_BIG_ENDIAN
)
932 /* BITPOS is the distance between our msb
933 and that of the containing datum.
934 Convert it to the distance from the lsb. */
935 bitpos
= total_bits
- bitsize
- bitpos
;
937 /* Now BITPOS is always the distance between our lsb
940 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
941 we must first convert its mode to MODE. */
943 if (CONST_INT_P (value
))
945 HOST_WIDE_INT v
= INTVAL (value
);
947 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
948 v
&= ((HOST_WIDE_INT
) 1 << bitsize
) - 1;
952 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
953 && v
== ((HOST_WIDE_INT
) 1 << bitsize
) - 1)
954 || (bitsize
== HOST_BITS_PER_WIDE_INT
&& v
== -1))
957 value
= lshift_value (mode
, value
, bitpos
, bitsize
);
961 int must_and
= (GET_MODE_BITSIZE (GET_MODE (value
)) != bitsize
962 && bitpos
+ bitsize
!= GET_MODE_BITSIZE (mode
));
964 if (GET_MODE (value
) != mode
)
965 value
= convert_to_mode (mode
, value
, 1);
968 value
= expand_binop (mode
, and_optab
, value
,
969 mask_rtx (mode
, 0, bitsize
, 0),
970 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
972 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
973 build_int_cst (NULL_TREE
, bitpos
), NULL_RTX
, 1);
976 /* Now clear the chosen bits in OP0,
977 except that if VALUE is -1 we need not bother. */
978 /* We keep the intermediates in registers to allow CSE to combine
979 consecutive bitfield assignments. */
981 temp
= force_reg (mode
, op0
);
985 temp
= expand_binop (mode
, and_optab
, temp
,
986 mask_rtx (mode
, bitpos
, bitsize
, 1),
987 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
988 temp
= force_reg (mode
, temp
);
991 /* Now logical-or VALUE into OP0, unless it is zero. */
995 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
996 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
997 temp
= force_reg (mode
, temp
);
1002 op0
= copy_rtx (op0
);
1003 emit_move_insn (op0
, temp
);
1007 /* Store a bit field that is split across multiple accessible memory objects.
1009 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1010 BITSIZE is the field width; BITPOS the position of its first bit
1012 VALUE is the value to store.
1014 This does not yet handle fields wider than BITS_PER_WORD. */
1017 store_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1018 unsigned HOST_WIDE_INT bitpos
, rtx value
)
1021 unsigned int bitsdone
= 0;
1023 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1025 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1026 unit
= BITS_PER_WORD
;
1028 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1030 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1031 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1032 that VALUE might be a floating-point constant. */
1033 if (CONSTANT_P (value
) && !CONST_INT_P (value
))
1035 rtx word
= gen_lowpart_common (word_mode
, value
);
1037 if (word
&& (value
!= word
))
1040 value
= gen_lowpart_common (word_mode
,
1041 force_reg (GET_MODE (value
) != VOIDmode
1043 : word_mode
, value
));
1046 while (bitsdone
< bitsize
)
1048 unsigned HOST_WIDE_INT thissize
;
1050 unsigned HOST_WIDE_INT thispos
;
1051 unsigned HOST_WIDE_INT offset
;
1053 offset
= (bitpos
+ bitsdone
) / unit
;
1054 thispos
= (bitpos
+ bitsdone
) % unit
;
1056 /* THISSIZE must not overrun a word boundary. Otherwise,
1057 store_fixed_bit_field will call us again, and we will mutually
1059 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1060 thissize
= MIN (thissize
, unit
- thispos
);
1062 if (BYTES_BIG_ENDIAN
)
1066 /* We must do an endian conversion exactly the same way as it is
1067 done in extract_bit_field, so that the two calls to
1068 extract_fixed_bit_field will have comparable arguments. */
1069 if (!MEM_P (value
) || GET_MODE (value
) == BLKmode
)
1070 total_bits
= BITS_PER_WORD
;
1072 total_bits
= GET_MODE_BITSIZE (GET_MODE (value
));
1074 /* Fetch successively less significant portions. */
1075 if (CONST_INT_P (value
))
1076 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1077 >> (bitsize
- bitsdone
- thissize
))
1078 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1080 /* The args are chosen so that the last part includes the
1081 lsb. Give extract_bit_field the value it needs (with
1082 endianness compensation) to fetch the piece we want. */
1083 part
= extract_fixed_bit_field (word_mode
, value
, 0, thissize
,
1084 total_bits
- bitsize
+ bitsdone
,
1085 NULL_RTX
, 1, false);
1089 /* Fetch successively more significant portions. */
1090 if (CONST_INT_P (value
))
1091 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1093 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1095 part
= extract_fixed_bit_field (word_mode
, value
, 0, thissize
,
1096 bitsdone
, NULL_RTX
, 1, false);
1099 /* If OP0 is a register, then handle OFFSET here.
1101 When handling multiword bitfields, extract_bit_field may pass
1102 down a word_mode SUBREG of a larger REG for a bitfield that actually
1103 crosses a word boundary. Thus, for a SUBREG, we must find
1104 the current word starting from the base register. */
1105 if (GET_CODE (op0
) == SUBREG
)
1107 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1108 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1109 GET_MODE (SUBREG_REG (op0
)));
1112 else if (REG_P (op0
))
1114 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1120 /* OFFSET is in UNITs, and UNIT is in bits.
1121 store_fixed_bit_field wants offset in bytes. */
1122 store_fixed_bit_field (word
, offset
* unit
/ BITS_PER_UNIT
, thissize
,
1124 bitsdone
+= thissize
;
1128 /* A subroutine of extract_bit_field_1 that converts return value X
1129 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1130 to extract_bit_field. */
1133 convert_extracted_bit_field (rtx x
, enum machine_mode mode
,
1134 enum machine_mode tmode
, bool unsignedp
)
1136 if (GET_MODE (x
) == tmode
|| GET_MODE (x
) == mode
)
1139 /* If the x mode is not a scalar integral, first convert to the
1140 integer mode of that size and then access it as a floating-point
1141 value via a SUBREG. */
1142 if (!SCALAR_INT_MODE_P (tmode
))
1144 enum machine_mode smode
;
1146 smode
= mode_for_size (GET_MODE_BITSIZE (tmode
), MODE_INT
, 0);
1147 x
= convert_to_mode (smode
, x
, unsignedp
);
1148 x
= force_reg (smode
, x
);
1149 return gen_lowpart (tmode
, x
);
1152 return convert_to_mode (tmode
, x
, unsignedp
);
1155 /* A subroutine of extract_bit_field, with the same arguments.
1156 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1157 if we can find no other means of implementing the operation.
1158 if FALLBACK_P is false, return NULL instead. */
1161 extract_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1162 unsigned HOST_WIDE_INT bitnum
,
1163 int unsignedp
, bool packedp
, rtx target
,
1164 enum machine_mode mode
, enum machine_mode tmode
,
1168 = (MEM_P (str_rtx
)) ? BITS_PER_UNIT
: BITS_PER_WORD
;
1169 unsigned HOST_WIDE_INT offset
, bitpos
;
1171 enum machine_mode int_mode
;
1172 enum machine_mode ext_mode
;
1173 enum machine_mode mode1
;
1174 enum insn_code icode
;
1177 if (tmode
== VOIDmode
)
1180 while (GET_CODE (op0
) == SUBREG
)
1182 bitnum
+= SUBREG_BYTE (op0
) * BITS_PER_UNIT
;
1183 op0
= SUBREG_REG (op0
);
1186 /* If we have an out-of-bounds access to a register, just return an
1187 uninitialized register of the required mode. This can occur if the
1188 source code contains an out-of-bounds access to a small array. */
1189 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
1190 return gen_reg_rtx (tmode
);
1193 && mode
== GET_MODE (op0
)
1195 && bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
1197 /* We're trying to extract a full register from itself. */
1201 /* See if we can get a better vector mode before extracting. */
1202 if (VECTOR_MODE_P (GET_MODE (op0
))
1204 && GET_MODE_INNER (GET_MODE (op0
)) != tmode
)
1206 enum machine_mode new_mode
;
1207 int nunits
= GET_MODE_NUNITS (GET_MODE (op0
));
1209 if (GET_MODE_CLASS (tmode
) == MODE_FLOAT
)
1210 new_mode
= MIN_MODE_VECTOR_FLOAT
;
1211 else if (GET_MODE_CLASS (tmode
) == MODE_FRACT
)
1212 new_mode
= MIN_MODE_VECTOR_FRACT
;
1213 else if (GET_MODE_CLASS (tmode
) == MODE_UFRACT
)
1214 new_mode
= MIN_MODE_VECTOR_UFRACT
;
1215 else if (GET_MODE_CLASS (tmode
) == MODE_ACCUM
)
1216 new_mode
= MIN_MODE_VECTOR_ACCUM
;
1217 else if (GET_MODE_CLASS (tmode
) == MODE_UACCUM
)
1218 new_mode
= MIN_MODE_VECTOR_UACCUM
;
1220 new_mode
= MIN_MODE_VECTOR_INT
;
1222 for (; new_mode
!= VOIDmode
; new_mode
= GET_MODE_WIDER_MODE (new_mode
))
1223 if (GET_MODE_NUNITS (new_mode
) == nunits
1224 && GET_MODE_SIZE (new_mode
) == GET_MODE_SIZE (GET_MODE (op0
))
1225 && targetm
.vector_mode_supported_p (new_mode
))
1227 if (new_mode
!= VOIDmode
)
1228 op0
= gen_lowpart (new_mode
, op0
);
1231 /* Use vec_extract patterns for extracting parts of vectors whenever
1233 if (VECTOR_MODE_P (GET_MODE (op0
))
1235 && optab_handler (vec_extract_optab
, GET_MODE (op0
)) != CODE_FOR_nothing
1236 && ((bitnum
+ bitsize
- 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
1237 == bitnum
/ GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
1239 enum machine_mode outermode
= GET_MODE (op0
);
1240 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
1241 int icode
= (int) optab_handler (vec_extract_optab
, outermode
);
1242 unsigned HOST_WIDE_INT pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
1243 rtx rtxpos
= GEN_INT (pos
);
1245 rtx dest
= NULL
, pat
, seq
;
1246 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
1247 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
1248 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
1250 if (innermode
== tmode
|| innermode
== mode
)
1254 dest
= gen_reg_rtx (innermode
);
1258 if (! (*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
))
1259 dest
= copy_to_mode_reg (mode0
, dest
);
1261 if (! (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
))
1262 src
= copy_to_mode_reg (mode1
, src
);
1264 if (! (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
))
1265 rtxpos
= copy_to_mode_reg (mode1
, rtxpos
);
1267 /* We could handle this, but we should always be called with a pseudo
1268 for our targets and all insns should take them as outputs. */
1269 gcc_assert ((*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
)
1270 && (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
)
1271 && (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
));
1273 pat
= GEN_FCN (icode
) (dest
, src
, rtxpos
);
1281 return gen_lowpart (tmode
, dest
);
1286 /* Make sure we are playing with integral modes. Pun with subregs
1289 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
1290 if (imode
!= GET_MODE (op0
))
1293 op0
= adjust_address (op0
, imode
, 0);
1294 else if (imode
!= BLKmode
)
1296 op0
= gen_lowpart (imode
, op0
);
1298 /* If we got a SUBREG, force it into a register since we
1299 aren't going to be able to do another SUBREG on it. */
1300 if (GET_CODE (op0
) == SUBREG
)
1301 op0
= force_reg (imode
, op0
);
1303 else if (REG_P (op0
))
1306 imode
= smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0
)),
1308 reg
= gen_reg_rtx (imode
);
1309 subreg
= gen_lowpart_SUBREG (GET_MODE (op0
), reg
);
1310 emit_move_insn (subreg
, op0
);
1312 bitnum
+= SUBREG_BYTE (subreg
) * BITS_PER_UNIT
;
1316 rtx mem
= assign_stack_temp (GET_MODE (op0
),
1317 GET_MODE_SIZE (GET_MODE (op0
)), 0);
1318 emit_move_insn (mem
, op0
);
1319 op0
= adjust_address (mem
, BLKmode
, 0);
1324 /* We may be accessing data outside the field, which means
1325 we can alias adjacent data. */
1328 op0
= shallow_copy_rtx (op0
);
1329 set_mem_alias_set (op0
, 0);
1330 set_mem_expr (op0
, 0);
1333 /* Extraction of a full-word or multi-word value from a structure
1334 in a register or aligned memory can be done with just a SUBREG.
1335 A subword value in the least significant part of a register
1336 can also be extracted with a SUBREG. For this, we need the
1337 byte offset of the value in op0. */
1339 bitpos
= bitnum
% unit
;
1340 offset
= bitnum
/ unit
;
1341 byte_offset
= bitpos
/ BITS_PER_UNIT
+ offset
* UNITS_PER_WORD
;
1343 /* If OP0 is a register, BITPOS must count within a word.
1344 But as we have it, it counts within whatever size OP0 now has.
1345 On a bigendian machine, these are not the same, so convert. */
1346 if (BYTES_BIG_ENDIAN
1348 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
1349 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
1351 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1352 If that's wrong, the solution is to test for it and set TARGET to 0
1355 /* Only scalar integer modes can be converted via subregs. There is an
1356 additional problem for FP modes here in that they can have a precision
1357 which is different from the size. mode_for_size uses precision, but
1358 we want a mode based on the size, so we must avoid calling it for FP
1360 mode1
= (SCALAR_INT_MODE_P (tmode
)
1361 ? mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0)
1364 /* If the bitfield is volatile, we need to make sure the access
1365 remains on a type-aligned boundary. */
1366 if (GET_CODE (op0
) == MEM
1367 && MEM_VOLATILE_P (op0
)
1368 && GET_MODE_BITSIZE (GET_MODE (op0
)) > 0
1369 && flag_strict_volatile_bitfields
> 0)
1370 goto no_subreg_mode_swap
;
1372 if (((bitsize
>= BITS_PER_WORD
&& bitsize
== GET_MODE_BITSIZE (mode
)
1373 && bitpos
% BITS_PER_WORD
== 0)
1374 || (mode1
!= BLKmode
1375 /* ??? The big endian test here is wrong. This is correct
1376 if the value is in a register, and if mode_for_size is not
1377 the same mode as op0. This causes us to get unnecessarily
1378 inefficient code from the Thumb port when -mbig-endian. */
1379 && (BYTES_BIG_ENDIAN
1380 ? bitpos
+ bitsize
== BITS_PER_WORD
1383 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1
),
1384 GET_MODE_BITSIZE (GET_MODE (op0
)))
1385 && GET_MODE_SIZE (mode1
) != 0
1386 && byte_offset
% GET_MODE_SIZE (mode1
) == 0)
1388 && (! SLOW_UNALIGNED_ACCESS (mode
, MEM_ALIGN (op0
))
1389 || (offset
* BITS_PER_UNIT
% bitsize
== 0
1390 && MEM_ALIGN (op0
) % bitsize
== 0)))))
1393 op0
= adjust_address (op0
, mode1
, offset
);
1394 else if (mode1
!= GET_MODE (op0
))
1396 rtx sub
= simplify_gen_subreg (mode1
, op0
, GET_MODE (op0
),
1399 goto no_subreg_mode_swap
;
1403 return convert_to_mode (tmode
, op0
, unsignedp
);
1406 no_subreg_mode_swap
:
1408 /* Handle fields bigger than a word. */
1410 if (bitsize
> BITS_PER_WORD
)
1412 /* Here we transfer the words of the field
1413 in the order least significant first.
1414 This is because the most significant word is the one which may
1415 be less than full. */
1417 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
1420 if (target
== 0 || !REG_P (target
))
1421 target
= gen_reg_rtx (mode
);
1423 /* Indicate for flow that the entire target reg is being set. */
1424 emit_clobber (target
);
1426 for (i
= 0; i
< nwords
; i
++)
1428 /* If I is 0, use the low-order word in both field and target;
1429 if I is 1, use the next to lowest word; and so on. */
1430 /* Word number in TARGET to use. */
1431 unsigned int wordnum
1433 ? GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
- i
- 1
1435 /* Offset from start of field in OP0. */
1436 unsigned int bit_offset
= (WORDS_BIG_ENDIAN
1437 ? MAX (0, ((int) bitsize
- ((int) i
+ 1)
1438 * (int) BITS_PER_WORD
))
1439 : (int) i
* BITS_PER_WORD
);
1440 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1442 = extract_bit_field (op0
, MIN (BITS_PER_WORD
,
1443 bitsize
- i
* BITS_PER_WORD
),
1444 bitnum
+ bit_offset
, 1, false, target_part
, mode
,
1447 gcc_assert (target_part
);
1449 if (result_part
!= target_part
)
1450 emit_move_insn (target_part
, result_part
);
1455 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1456 need to be zero'd out. */
1457 if (GET_MODE_SIZE (GET_MODE (target
)) > nwords
* UNITS_PER_WORD
)
1459 unsigned int i
, total_words
;
1461 total_words
= GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
;
1462 for (i
= nwords
; i
< total_words
; i
++)
1464 (operand_subword (target
,
1465 WORDS_BIG_ENDIAN
? total_words
- i
- 1 : i
,
1472 /* Signed bit field: sign-extend with two arithmetic shifts. */
1473 target
= expand_shift (LSHIFT_EXPR
, mode
, target
,
1474 build_int_cst (NULL_TREE
,
1475 GET_MODE_BITSIZE (mode
) - bitsize
),
1477 return expand_shift (RSHIFT_EXPR
, mode
, target
,
1478 build_int_cst (NULL_TREE
,
1479 GET_MODE_BITSIZE (mode
) - bitsize
),
1483 /* From here on we know the desired field is smaller than a word. */
1485 /* Check if there is a correspondingly-sized integer field, so we can
1486 safely extract it as one size of integer, if necessary; then
1487 truncate or extend to the size that is wanted; then use SUBREGs or
1488 convert_to_mode to get one of the modes we really wanted. */
1490 int_mode
= int_mode_for_mode (tmode
);
1491 if (int_mode
== BLKmode
)
1492 int_mode
= int_mode_for_mode (mode
);
1493 /* Should probably push op0 out to memory and then do a load. */
1494 gcc_assert (int_mode
!= BLKmode
);
1496 /* OFFSET is the number of words or bytes (UNIT says which)
1497 from STR_RTX to the first word or byte containing part of the field. */
1501 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
1504 op0
= copy_to_reg (op0
);
1505 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
1506 op0
, (offset
* UNITS_PER_WORD
));
1511 /* Now OFFSET is nonzero only for memory operands. */
1512 ext_mode
= mode_for_extraction (unsignedp
? EP_extzv
: EP_extv
, 0);
1513 icode
= unsignedp
? CODE_FOR_extzv
: CODE_FOR_extv
;
1514 if (ext_mode
!= MAX_MACHINE_MODE
1516 && GET_MODE_BITSIZE (ext_mode
) >= bitsize
1517 /* If op0 is a register, we need it in EXT_MODE to make it
1518 acceptable to the format of ext(z)v. */
1519 && !(GET_CODE (op0
) == SUBREG
&& GET_MODE (op0
) != ext_mode
)
1520 && !((REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1521 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (ext_mode
)))
1522 && check_predicate_volatile_ok (icode
, 1, op0
, GET_MODE (op0
)))
1524 unsigned HOST_WIDE_INT xbitpos
= bitpos
, xoffset
= offset
;
1525 rtx bitsize_rtx
, bitpos_rtx
;
1526 rtx last
= get_last_insn ();
1528 rtx xtarget
= target
;
1529 rtx xspec_target
= target
;
1530 rtx xspec_target_subreg
= 0;
1533 /* If op0 is a register, we need it in EXT_MODE to make it
1534 acceptable to the format of ext(z)v. */
1535 if (REG_P (xop0
) && GET_MODE (xop0
) != ext_mode
)
1536 xop0
= gen_lowpart_SUBREG (ext_mode
, xop0
);
1538 /* Get ref to first byte containing part of the field. */
1539 xop0
= adjust_address (xop0
, byte_mode
, xoffset
);
1541 /* On big-endian machines, we count bits from the most significant.
1542 If the bit field insn does not, we must invert. */
1543 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1544 xbitpos
= unit
- bitsize
- xbitpos
;
1546 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1547 if (BITS_BIG_ENDIAN
&& !MEM_P (xop0
))
1548 xbitpos
+= GET_MODE_BITSIZE (ext_mode
) - unit
;
1550 unit
= GET_MODE_BITSIZE (ext_mode
);
1553 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1555 if (GET_MODE (xtarget
) != ext_mode
)
1557 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1558 between the mode of the extraction (word_mode) and the target
1559 mode. Instead, create a temporary and use convert_move to set
1562 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (xtarget
)),
1563 GET_MODE_BITSIZE (ext_mode
)))
1565 xtarget
= gen_lowpart (ext_mode
, xtarget
);
1566 if (GET_MODE_SIZE (ext_mode
)
1567 > GET_MODE_SIZE (GET_MODE (xspec_target
)))
1568 xspec_target_subreg
= xtarget
;
1571 xtarget
= gen_reg_rtx (ext_mode
);
1574 /* If this machine's ext(z)v insists on a register target,
1575 make sure we have one. */
1576 if (!insn_data
[(int) icode
].operand
[0].predicate (xtarget
, ext_mode
))
1577 xtarget
= gen_reg_rtx (ext_mode
);
1579 bitsize_rtx
= GEN_INT (bitsize
);
1580 bitpos_rtx
= GEN_INT (xbitpos
);
1583 ? gen_extzv (xtarget
, xop0
, bitsize_rtx
, bitpos_rtx
)
1584 : gen_extv (xtarget
, xop0
, bitsize_rtx
, bitpos_rtx
));
1588 if (xtarget
== xspec_target
)
1590 if (xtarget
== xspec_target_subreg
)
1591 return xspec_target
;
1592 return convert_extracted_bit_field (xtarget
, mode
, tmode
, unsignedp
);
1594 delete_insns_since (last
);
1597 /* If OP0 is a memory, try copying it to a register and seeing if a
1598 cheap register alternative is available. */
1599 if (ext_mode
!= MAX_MACHINE_MODE
&& MEM_P (op0
))
1601 enum machine_mode bestmode
;
1603 /* Get the mode to use for inserting into this field. If
1604 OP0 is BLKmode, get the smallest mode consistent with the
1605 alignment. If OP0 is a non-BLKmode object that is no
1606 wider than EXT_MODE, use its mode. Otherwise, use the
1607 smallest mode containing the field. */
1609 if (GET_MODE (op0
) == BLKmode
1610 || (ext_mode
!= MAX_MACHINE_MODE
1611 && GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (ext_mode
)))
1612 bestmode
= get_best_mode (bitsize
, bitnum
, MEM_ALIGN (op0
),
1613 (ext_mode
== MAX_MACHINE_MODE
1614 ? VOIDmode
: ext_mode
),
1615 MEM_VOLATILE_P (op0
));
1617 bestmode
= GET_MODE (op0
);
1619 if (bestmode
!= VOIDmode
1620 && !(SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (op0
))
1621 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (op0
)))
1623 unsigned HOST_WIDE_INT xoffset
, xbitpos
;
1625 /* Compute the offset as a multiple of this unit,
1626 counting in bytes. */
1627 unit
= GET_MODE_BITSIZE (bestmode
);
1628 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1629 xbitpos
= bitnum
% unit
;
1631 /* Make sure the register is big enough for the whole field. */
1632 if (xoffset
* BITS_PER_UNIT
+ unit
1633 >= offset
* BITS_PER_UNIT
+ bitsize
)
1635 rtx last
, result
, xop0
;
1637 last
= get_last_insn ();
1639 /* Fetch it to a register in that size. */
1640 xop0
= adjust_address (op0
, bestmode
, xoffset
);
1641 xop0
= force_reg (bestmode
, xop0
);
1642 result
= extract_bit_field_1 (xop0
, bitsize
, xbitpos
,
1643 unsignedp
, packedp
, target
,
1644 mode
, tmode
, false);
1648 delete_insns_since (last
);
1656 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1657 bitpos
, target
, unsignedp
, packedp
);
1658 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1661 /* Generate code to extract a byte-field from STR_RTX
1662 containing BITSIZE bits, starting at BITNUM,
1663 and put it in TARGET if possible (if TARGET is nonzero).
1664 Regardless of TARGET, we return the rtx for where the value is placed.
1666 STR_RTX is the structure containing the byte (a REG or MEM).
1667 UNSIGNEDP is nonzero if this is an unsigned bit field.
1668 PACKEDP is nonzero if the field has the packed attribute.
1669 MODE is the natural mode of the field value once extracted.
1670 TMODE is the mode the caller would like the value to have;
1671 but the value may be returned with type MODE instead.
1673 If a TARGET is specified and we can store in it at no extra cost,
1674 we do so, and return TARGET.
1675 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1676 if they are equally easy. */
1679 extract_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1680 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, bool packedp
,
1681 rtx target
, enum machine_mode mode
, enum machine_mode tmode
)
1683 return extract_bit_field_1 (str_rtx
, bitsize
, bitnum
, unsignedp
, packedp
,
1684 target
, mode
, tmode
, true);
1687 /* Extract a bit field using shifts and boolean operations
1688 Returns an rtx to represent the value.
1689 OP0 addresses a register (word) or memory (byte).
1690 BITPOS says which bit within the word or byte the bit field starts in.
1691 OFFSET says how many bytes farther the bit field starts;
1692 it is 0 if OP0 is a register.
1693 BITSIZE says how many bits long the bit field is.
1694 (If OP0 is a register, it may be narrower than a full word,
1695 but BITPOS still counts within a full word,
1696 which is significant on bigendian machines.)
1698 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1699 PACKEDP is true if the field has the packed attribute.
1701 If TARGET is nonzero, attempts to store the value there
1702 and return TARGET, but this is not guaranteed.
1703 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1706 extract_fixed_bit_field (enum machine_mode tmode
, rtx op0
,
1707 unsigned HOST_WIDE_INT offset
,
1708 unsigned HOST_WIDE_INT bitsize
,
1709 unsigned HOST_WIDE_INT bitpos
, rtx target
,
1710 int unsignedp
, bool packedp
)
1712 unsigned int total_bits
= BITS_PER_WORD
;
1713 enum machine_mode mode
;
1715 if (GET_CODE (op0
) == SUBREG
|| REG_P (op0
))
1717 /* Special treatment for a bit field split across two registers. */
1718 if (bitsize
+ bitpos
> BITS_PER_WORD
)
1719 return extract_split_bit_field (op0
, bitsize
, bitpos
, unsignedp
);
1723 /* Get the proper mode to use for this field. We want a mode that
1724 includes the entire field. If such a mode would be larger than
1725 a word, we won't be doing the extraction the normal way. */
1727 if (MEM_VOLATILE_P (op0
)
1728 && flag_strict_volatile_bitfields
> 0)
1730 if (GET_MODE_BITSIZE (GET_MODE (op0
)) > 0)
1731 mode
= GET_MODE (op0
);
1732 else if (target
&& GET_MODE_BITSIZE (GET_MODE (target
)) > 0)
1733 mode
= GET_MODE (target
);
1738 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
1739 MEM_ALIGN (op0
), word_mode
, MEM_VOLATILE_P (op0
));
1741 if (mode
== VOIDmode
)
1742 /* The only way this should occur is if the field spans word
1744 return extract_split_bit_field (op0
, bitsize
,
1745 bitpos
+ offset
* BITS_PER_UNIT
,
1748 total_bits
= GET_MODE_BITSIZE (mode
);
1750 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1751 be in the range 0 to total_bits-1, and put any excess bytes in
1753 if (bitpos
>= total_bits
)
1755 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
1756 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
1760 /* If we're accessing a volatile MEM, we can't do the next
1761 alignment step if it results in a multi-word access where we
1762 otherwise wouldn't have one. So, check for that case
1765 && MEM_VOLATILE_P (op0
)
1766 && flag_strict_volatile_bitfields
> 0
1767 && bitpos
+ bitsize
<= total_bits
1768 && bitpos
+ bitsize
+ (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
> total_bits
)
1770 if (STRICT_ALIGNMENT
)
1772 static bool informed_about_misalignment
= false;
1777 if (bitsize
== total_bits
)
1778 warned
= warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1779 "multiple accesses to volatile structure member"
1780 " because of packed attribute");
1782 warned
= warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1783 "multiple accesses to volatile structure bitfield"
1784 " because of packed attribute");
1786 return extract_split_bit_field (op0
, bitsize
,
1787 bitpos
+ offset
* BITS_PER_UNIT
,
1791 if (bitsize
== total_bits
)
1792 warned
= warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1793 "mis-aligned access used for structure member");
1795 warned
= warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1796 "mis-aligned access used for structure bitfield");
1798 if (! informed_about_misalignment
&& warned
)
1800 informed_about_misalignment
= true;
1801 inform (input_location
,
1802 "when a volatile object spans multiple type-sized locations,"
1803 " the compiler must choose between using a single mis-aligned access to"
1804 " preserve the volatility, or using multiple aligned accesses to avoid"
1805 " runtime faults; this code may fail at runtime if the hardware does"
1806 " not allow this access");
1813 /* Get ref to an aligned byte, halfword, or word containing the field.
1814 Adjust BITPOS to be position within a word,
1815 and OFFSET to be the offset of that word.
1816 Then alter OP0 to refer to that word. */
1817 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
1818 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
1821 op0
= adjust_address (op0
, mode
, offset
);
1824 mode
= GET_MODE (op0
);
1826 if (BYTES_BIG_ENDIAN
)
1827 /* BITPOS is the distance between our msb and that of OP0.
1828 Convert it to the distance from the lsb. */
1829 bitpos
= total_bits
- bitsize
- bitpos
;
1831 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1832 We have reduced the big-endian case to the little-endian case. */
1838 /* If the field does not already start at the lsb,
1839 shift it so it does. */
1840 tree amount
= build_int_cst (NULL_TREE
, bitpos
);
1841 /* Maybe propagate the target for the shift. */
1842 /* But not if we will return it--could confuse integrate.c. */
1843 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1844 if (tmode
!= mode
) subtarget
= 0;
1845 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1847 /* Convert the value to the desired mode. */
1849 op0
= convert_to_mode (tmode
, op0
, 1);
1851 /* Unless the msb of the field used to be the msb when we shifted,
1852 mask out the upper bits. */
1854 if (GET_MODE_BITSIZE (mode
) != bitpos
+ bitsize
)
1855 return expand_binop (GET_MODE (op0
), and_optab
, op0
,
1856 mask_rtx (GET_MODE (op0
), 0, bitsize
, 0),
1857 target
, 1, OPTAB_LIB_WIDEN
);
1861 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1862 then arithmetic-shift its lsb to the lsb of the word. */
1863 op0
= force_reg (mode
, op0
);
1867 /* Find the narrowest integer mode that contains the field. */
1869 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1870 mode
= GET_MODE_WIDER_MODE (mode
))
1871 if (GET_MODE_BITSIZE (mode
) >= bitsize
+ bitpos
)
1873 op0
= convert_to_mode (mode
, op0
, 0);
1877 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitpos
))
1880 = build_int_cst (NULL_TREE
,
1881 GET_MODE_BITSIZE (mode
) - (bitsize
+ bitpos
));
1882 /* Maybe propagate the target for the shift. */
1883 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1884 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1887 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
1888 build_int_cst (NULL_TREE
,
1889 GET_MODE_BITSIZE (mode
) - bitsize
),
1893 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1894 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1895 complement of that if COMPLEMENT. The mask is truncated if
1896 necessary to the width of mode MODE. The mask is zero-extended if
1897 BITSIZE+BITPOS is too small for MODE. */
1900 mask_rtx (enum machine_mode mode
, int bitpos
, int bitsize
, int complement
)
1904 mask
= double_int_mask (bitsize
);
1905 mask
= double_int_lshift (mask
, bitpos
, HOST_BITS_PER_DOUBLE_INT
, false);
1908 mask
= double_int_not (mask
);
1910 return immed_double_int_const (mask
, mode
);
1913 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1914 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1917 lshift_value (enum machine_mode mode
, rtx value
, int bitpos
, int bitsize
)
1921 val
= double_int_zext (uhwi_to_double_int (INTVAL (value
)), bitsize
);
1922 val
= double_int_lshift (val
, bitpos
, HOST_BITS_PER_DOUBLE_INT
, false);
1924 return immed_double_int_const (val
, mode
);
1927 /* Extract a bit field that is split across two words
1928 and return an RTX for the result.
1930 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1931 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1932 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1935 extract_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1936 unsigned HOST_WIDE_INT bitpos
, int unsignedp
)
1939 unsigned int bitsdone
= 0;
1940 rtx result
= NULL_RTX
;
1943 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1945 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1946 unit
= BITS_PER_WORD
;
1948 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1950 while (bitsdone
< bitsize
)
1952 unsigned HOST_WIDE_INT thissize
;
1954 unsigned HOST_WIDE_INT thispos
;
1955 unsigned HOST_WIDE_INT offset
;
1957 offset
= (bitpos
+ bitsdone
) / unit
;
1958 thispos
= (bitpos
+ bitsdone
) % unit
;
1960 /* THISSIZE must not overrun a word boundary. Otherwise,
1961 extract_fixed_bit_field will call us again, and we will mutually
1963 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1964 thissize
= MIN (thissize
, unit
- thispos
);
1966 /* If OP0 is a register, then handle OFFSET here.
1968 When handling multiword bitfields, extract_bit_field may pass
1969 down a word_mode SUBREG of a larger REG for a bitfield that actually
1970 crosses a word boundary. Thus, for a SUBREG, we must find
1971 the current word starting from the base register. */
1972 if (GET_CODE (op0
) == SUBREG
)
1974 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1975 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1976 GET_MODE (SUBREG_REG (op0
)));
1979 else if (REG_P (op0
))
1981 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1987 /* Extract the parts in bit-counting order,
1988 whose meaning is determined by BYTES_PER_UNIT.
1989 OFFSET is in UNITs, and UNIT is in bits.
1990 extract_fixed_bit_field wants offset in bytes. */
1991 part
= extract_fixed_bit_field (word_mode
, word
,
1992 offset
* unit
/ BITS_PER_UNIT
,
1993 thissize
, thispos
, 0, 1, false);
1994 bitsdone
+= thissize
;
1996 /* Shift this part into place for the result. */
1997 if (BYTES_BIG_ENDIAN
)
1999 if (bitsize
!= bitsdone
)
2000 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
2001 build_int_cst (NULL_TREE
, bitsize
- bitsdone
),
2006 if (bitsdone
!= thissize
)
2007 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
2008 build_int_cst (NULL_TREE
,
2009 bitsdone
- thissize
), 0, 1);
2015 /* Combine the parts with bitwise or. This works
2016 because we extracted each part as an unsigned bit field. */
2017 result
= expand_binop (word_mode
, ior_optab
, part
, result
, NULL_RTX
, 1,
2023 /* Unsigned bit field: we are done. */
2026 /* Signed bit field: sign-extend with two arithmetic shifts. */
2027 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
2028 build_int_cst (NULL_TREE
, BITS_PER_WORD
- bitsize
),
2030 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
2031 build_int_cst (NULL_TREE
, BITS_PER_WORD
- bitsize
),
2035 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2036 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2037 MODE, fill the upper bits with zeros. Fail if the layout of either
2038 mode is unknown (as for CC modes) or if the extraction would involve
2039 unprofitable mode punning. Return the value on success, otherwise
2042 This is different from gen_lowpart* in these respects:
2044 - the returned value must always be considered an rvalue
2046 - when MODE is wider than SRC_MODE, the extraction involves
2049 - when MODE is smaller than SRC_MODE, the extraction involves
2050 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
2052 In other words, this routine performs a computation, whereas the
2053 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2057 extract_low_bits (enum machine_mode mode
, enum machine_mode src_mode
, rtx src
)
2059 enum machine_mode int_mode
, src_int_mode
;
2061 if (mode
== src_mode
)
2064 if (CONSTANT_P (src
))
2066 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2067 fails, it will happily create (subreg (symbol_ref)) or similar
2069 unsigned int byte
= subreg_lowpart_offset (mode
, src_mode
);
2070 rtx ret
= simplify_subreg (mode
, src
, src_mode
, byte
);
2074 if (GET_MODE (src
) == VOIDmode
2075 || !validate_subreg (mode
, src_mode
, src
, byte
))
2078 src
= force_reg (GET_MODE (src
), src
);
2079 return gen_rtx_SUBREG (mode
, src
, byte
);
2082 if (GET_MODE_CLASS (mode
) == MODE_CC
|| GET_MODE_CLASS (src_mode
) == MODE_CC
)
2085 if (GET_MODE_BITSIZE (mode
) == GET_MODE_BITSIZE (src_mode
)
2086 && MODES_TIEABLE_P (mode
, src_mode
))
2088 rtx x
= gen_lowpart_common (mode
, src
);
2093 src_int_mode
= int_mode_for_mode (src_mode
);
2094 int_mode
= int_mode_for_mode (mode
);
2095 if (src_int_mode
== BLKmode
|| int_mode
== BLKmode
)
2098 if (!MODES_TIEABLE_P (src_int_mode
, src_mode
))
2100 if (!MODES_TIEABLE_P (int_mode
, mode
))
2103 src
= gen_lowpart (src_int_mode
, src
);
2104 src
= convert_modes (int_mode
, src_int_mode
, src
, true);
2105 src
= gen_lowpart (mode
, src
);
2109 /* Add INC into TARGET. */
2112 expand_inc (rtx target
, rtx inc
)
2114 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
2116 target
, 0, OPTAB_LIB_WIDEN
);
2117 if (value
!= target
)
2118 emit_move_insn (target
, value
);
2121 /* Subtract DEC from TARGET. */
2124 expand_dec (rtx target
, rtx dec
)
2126 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
2128 target
, 0, OPTAB_LIB_WIDEN
);
2129 if (value
!= target
)
2130 emit_move_insn (target
, value
);
2133 /* Output a shift instruction for expression code CODE,
2134 with SHIFTED being the rtx for the value to shift,
2135 and AMOUNT the tree for the amount to shift by.
2136 Store the result in the rtx TARGET, if that is convenient.
2137 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2138 Return the rtx for where the value is. */
2141 expand_shift (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2142 tree amount
, rtx target
, int unsignedp
)
2145 int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
2146 int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
2147 optab lshift_optab
= ashl_optab
;
2148 optab rshift_arith_optab
= ashr_optab
;
2149 optab rshift_uns_optab
= lshr_optab
;
2150 optab lrotate_optab
= rotl_optab
;
2151 optab rrotate_optab
= rotr_optab
;
2152 enum machine_mode op1_mode
;
2154 bool speed
= optimize_insn_for_speed_p ();
2156 op1
= expand_normal (amount
);
2157 op1_mode
= GET_MODE (op1
);
2159 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2160 shift amount is a vector, use the vector/vector shift patterns. */
2161 if (VECTOR_MODE_P (mode
) && VECTOR_MODE_P (op1_mode
))
2163 lshift_optab
= vashl_optab
;
2164 rshift_arith_optab
= vashr_optab
;
2165 rshift_uns_optab
= vlshr_optab
;
2166 lrotate_optab
= vrotl_optab
;
2167 rrotate_optab
= vrotr_optab
;
2170 /* Previously detected shift-counts computed by NEGATE_EXPR
2171 and shifted in the other direction; but that does not work
2174 if (SHIFT_COUNT_TRUNCATED
)
2176 if (CONST_INT_P (op1
)
2177 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
) >=
2178 (unsigned HOST_WIDE_INT
) GET_MODE_BITSIZE (mode
)))
2179 op1
= GEN_INT ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
2180 % GET_MODE_BITSIZE (mode
));
2181 else if (GET_CODE (op1
) == SUBREG
2182 && subreg_lowpart_p (op1
)
2183 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (op1
))))
2184 op1
= SUBREG_REG (op1
);
2187 if (op1
== const0_rtx
)
2190 /* Check whether its cheaper to implement a left shift by a constant
2191 bit count by a sequence of additions. */
2192 if (code
== LSHIFT_EXPR
2193 && CONST_INT_P (op1
)
2195 && INTVAL (op1
) < GET_MODE_BITSIZE (mode
)
2196 && INTVAL (op1
) < MAX_BITS_PER_WORD
2197 && shift_cost
[speed
][mode
][INTVAL (op1
)] > INTVAL (op1
) * add_cost
[speed
][mode
]
2198 && shift_cost
[speed
][mode
][INTVAL (op1
)] != MAX_COST
)
2201 for (i
= 0; i
< INTVAL (op1
); i
++)
2203 temp
= force_reg (mode
, shifted
);
2204 shifted
= expand_binop (mode
, add_optab
, temp
, temp
, NULL_RTX
,
2205 unsignedp
, OPTAB_LIB_WIDEN
);
2210 for (attempt
= 0; temp
== 0 && attempt
< 3; attempt
++)
2212 enum optab_methods methods
;
2215 methods
= OPTAB_DIRECT
;
2216 else if (attempt
== 1)
2217 methods
= OPTAB_WIDEN
;
2219 methods
= OPTAB_LIB_WIDEN
;
2223 /* Widening does not work for rotation. */
2224 if (methods
== OPTAB_WIDEN
)
2226 else if (methods
== OPTAB_LIB_WIDEN
)
2228 /* If we have been unable to open-code this by a rotation,
2229 do it as the IOR of two shifts. I.e., to rotate A
2230 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2231 where C is the bitsize of A.
2233 It is theoretically possible that the target machine might
2234 not be able to perform either shift and hence we would
2235 be making two libcalls rather than just the one for the
2236 shift (similarly if IOR could not be done). We will allow
2237 this extremely unlikely lossage to avoid complicating the
2240 rtx subtarget
= target
== shifted
? 0 : target
;
2241 tree new_amount
, other_amount
;
2243 tree type
= TREE_TYPE (amount
);
2244 if (GET_MODE (op1
) != TYPE_MODE (type
)
2245 && GET_MODE (op1
) != VOIDmode
)
2246 op1
= convert_to_mode (TYPE_MODE (type
), op1
, 1);
2247 new_amount
= make_tree (type
, op1
);
2249 = fold_build2 (MINUS_EXPR
, type
,
2250 build_int_cst (type
, GET_MODE_BITSIZE (mode
)),
2253 shifted
= force_reg (mode
, shifted
);
2255 temp
= expand_shift (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
2256 mode
, shifted
, new_amount
, 0, 1);
2257 temp1
= expand_shift (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
2258 mode
, shifted
, other_amount
, subtarget
, 1);
2259 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
2260 unsignedp
, methods
);
2263 temp
= expand_binop (mode
,
2264 left
? lrotate_optab
: rrotate_optab
,
2265 shifted
, op1
, target
, unsignedp
, methods
);
2268 temp
= expand_binop (mode
,
2269 left
? lshift_optab
: rshift_uns_optab
,
2270 shifted
, op1
, target
, unsignedp
, methods
);
2272 /* Do arithmetic shifts.
2273 Also, if we are going to widen the operand, we can just as well
2274 use an arithmetic right-shift instead of a logical one. */
2275 if (temp
== 0 && ! rotate
2276 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
2278 enum optab_methods methods1
= methods
;
2280 /* If trying to widen a log shift to an arithmetic shift,
2281 don't accept an arithmetic shift of the same size. */
2283 methods1
= OPTAB_MUST_WIDEN
;
2285 /* Arithmetic shift */
2287 temp
= expand_binop (mode
,
2288 left
? lshift_optab
: rshift_arith_optab
,
2289 shifted
, op1
, target
, unsignedp
, methods1
);
2292 /* We used to try extzv here for logical right shifts, but that was
2293 only useful for one machine, the VAX, and caused poor code
2294 generation there for lshrdi3, so the code was deleted and a
2295 define_expand for lshrsi3 was added to vax.md. */
2302 /* Indicates the type of fixup needed after a constant multiplication.
2303 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2304 the result should be negated, and ADD_VARIANT means that the
2305 multiplicand should be added to the result. */
2306 enum mult_variant
{basic_variant
, negate_variant
, add_variant
};
2308 static void synth_mult (struct algorithm
*, unsigned HOST_WIDE_INT
,
2309 const struct mult_cost
*, enum machine_mode mode
);
2310 static bool choose_mult_variant (enum machine_mode
, HOST_WIDE_INT
,
2311 struct algorithm
*, enum mult_variant
*, int);
2312 static rtx
expand_mult_const (enum machine_mode
, rtx
, HOST_WIDE_INT
, rtx
,
2313 const struct algorithm
*, enum mult_variant
);
2314 static unsigned HOST_WIDE_INT
choose_multiplier (unsigned HOST_WIDE_INT
, int,
2315 int, rtx
*, int *, int *);
2316 static unsigned HOST_WIDE_INT
invert_mod2n (unsigned HOST_WIDE_INT
, int);
2317 static rtx
extract_high_half (enum machine_mode
, rtx
);
2318 static rtx
expand_mult_highpart (enum machine_mode
, rtx
, rtx
, rtx
, int, int);
2319 static rtx
expand_mult_highpart_optab (enum machine_mode
, rtx
, rtx
, rtx
,
2321 /* Compute and return the best algorithm for multiplying by T.
2322 The algorithm must cost less than cost_limit
2323 If retval.cost >= COST_LIMIT, no algorithm was found and all
2324 other field of the returned struct are undefined.
2325 MODE is the machine mode of the multiplication. */
2328 synth_mult (struct algorithm
*alg_out
, unsigned HOST_WIDE_INT t
,
2329 const struct mult_cost
*cost_limit
, enum machine_mode mode
)
2332 struct algorithm
*alg_in
, *best_alg
;
2333 struct mult_cost best_cost
;
2334 struct mult_cost new_limit
;
2335 int op_cost
, op_latency
;
2336 unsigned HOST_WIDE_INT orig_t
= t
;
2337 unsigned HOST_WIDE_INT q
;
2338 int maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
2340 bool cache_hit
= false;
2341 enum alg_code cache_alg
= alg_zero
;
2342 bool speed
= optimize_insn_for_speed_p ();
2344 /* Indicate that no algorithm is yet found. If no algorithm
2345 is found, this value will be returned and indicate failure. */
2346 alg_out
->cost
.cost
= cost_limit
->cost
+ 1;
2347 alg_out
->cost
.latency
= cost_limit
->latency
+ 1;
2349 if (cost_limit
->cost
< 0
2350 || (cost_limit
->cost
== 0 && cost_limit
->latency
<= 0))
2353 /* Restrict the bits of "t" to the multiplication's mode. */
2354 t
&= GET_MODE_MASK (mode
);
2356 /* t == 1 can be done in zero cost. */
2360 alg_out
->cost
.cost
= 0;
2361 alg_out
->cost
.latency
= 0;
2362 alg_out
->op
[0] = alg_m
;
2366 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2370 if (MULT_COST_LESS (cost_limit
, zero_cost
[speed
]))
2375 alg_out
->cost
.cost
= zero_cost
[speed
];
2376 alg_out
->cost
.latency
= zero_cost
[speed
];
2377 alg_out
->op
[0] = alg_zero
;
2382 /* We'll be needing a couple extra algorithm structures now. */
2384 alg_in
= XALLOCA (struct algorithm
);
2385 best_alg
= XALLOCA (struct algorithm
);
2386 best_cost
= *cost_limit
;
2388 /* Compute the hash index. */
2389 hash_index
= (t
^ (unsigned int) mode
^ (speed
* 256)) % NUM_ALG_HASH_ENTRIES
;
2391 /* See if we already know what to do for T. */
2392 if (alg_hash
[hash_index
].t
== t
2393 && alg_hash
[hash_index
].mode
== mode
2394 && alg_hash
[hash_index
].mode
== mode
2395 && alg_hash
[hash_index
].speed
== speed
2396 && alg_hash
[hash_index
].alg
!= alg_unknown
)
2398 cache_alg
= alg_hash
[hash_index
].alg
;
2400 if (cache_alg
== alg_impossible
)
2402 /* The cache tells us that it's impossible to synthesize
2403 multiplication by T within alg_hash[hash_index].cost. */
2404 if (!CHEAPER_MULT_COST (&alg_hash
[hash_index
].cost
, cost_limit
))
2405 /* COST_LIMIT is at least as restrictive as the one
2406 recorded in the hash table, in which case we have no
2407 hope of synthesizing a multiplication. Just
2411 /* If we get here, COST_LIMIT is less restrictive than the
2412 one recorded in the hash table, so we may be able to
2413 synthesize a multiplication. Proceed as if we didn't
2414 have the cache entry. */
2418 if (CHEAPER_MULT_COST (cost_limit
, &alg_hash
[hash_index
].cost
))
2419 /* The cached algorithm shows that this multiplication
2420 requires more cost than COST_LIMIT. Just return. This
2421 way, we don't clobber this cache entry with
2422 alg_impossible but retain useful information. */
2434 goto do_alg_addsub_t_m2
;
2436 case alg_add_factor
:
2437 case alg_sub_factor
:
2438 goto do_alg_addsub_factor
;
2441 goto do_alg_add_t2_m
;
2444 goto do_alg_sub_t2_m
;
2452 /* If we have a group of zero bits at the low-order part of T, try
2453 multiplying by the remaining bits and then doing a shift. */
2458 m
= floor_log2 (t
& -t
); /* m = number of low zero bits */
2462 /* The function expand_shift will choose between a shift and
2463 a sequence of additions, so the observed cost is given as
2464 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2465 op_cost
= m
* add_cost
[speed
][mode
];
2466 if (shift_cost
[speed
][mode
][m
] < op_cost
)
2467 op_cost
= shift_cost
[speed
][mode
][m
];
2468 new_limit
.cost
= best_cost
.cost
- op_cost
;
2469 new_limit
.latency
= best_cost
.latency
- op_cost
;
2470 synth_mult (alg_in
, q
, &new_limit
, mode
);
2472 alg_in
->cost
.cost
+= op_cost
;
2473 alg_in
->cost
.latency
+= op_cost
;
2474 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2476 struct algorithm
*x
;
2477 best_cost
= alg_in
->cost
;
2478 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2479 best_alg
->log
[best_alg
->ops
] = m
;
2480 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2483 /* See if treating ORIG_T as a signed number yields a better
2484 sequence. Try this sequence only for a negative ORIG_T
2485 as it would be useless for a non-negative ORIG_T. */
2486 if ((HOST_WIDE_INT
) orig_t
< 0)
2488 /* Shift ORIG_T as follows because a right shift of a
2489 negative-valued signed type is implementation
2491 q
= ~(~orig_t
>> m
);
2492 /* The function expand_shift will choose between a shift
2493 and a sequence of additions, so the observed cost is
2494 given as MIN (m * add_cost[speed][mode],
2495 shift_cost[speed][mode][m]). */
2496 op_cost
= m
* add_cost
[speed
][mode
];
2497 if (shift_cost
[speed
][mode
][m
] < op_cost
)
2498 op_cost
= shift_cost
[speed
][mode
][m
];
2499 new_limit
.cost
= best_cost
.cost
- op_cost
;
2500 new_limit
.latency
= best_cost
.latency
- op_cost
;
2501 synth_mult (alg_in
, q
, &new_limit
, mode
);
2503 alg_in
->cost
.cost
+= op_cost
;
2504 alg_in
->cost
.latency
+= op_cost
;
2505 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2507 struct algorithm
*x
;
2508 best_cost
= alg_in
->cost
;
2509 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2510 best_alg
->log
[best_alg
->ops
] = m
;
2511 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2519 /* If we have an odd number, add or subtract one. */
2522 unsigned HOST_WIDE_INT w
;
2525 for (w
= 1; (w
& t
) != 0; w
<<= 1)
2527 /* If T was -1, then W will be zero after the loop. This is another
2528 case where T ends with ...111. Handling this with (T + 1) and
2529 subtract 1 produces slightly better code and results in algorithm
2530 selection much faster than treating it like the ...0111 case
2534 /* Reject the case where t is 3.
2535 Thus we prefer addition in that case. */
2538 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2540 op_cost
= add_cost
[speed
][mode
];
2541 new_limit
.cost
= best_cost
.cost
- op_cost
;
2542 new_limit
.latency
= best_cost
.latency
- op_cost
;
2543 synth_mult (alg_in
, t
+ 1, &new_limit
, mode
);
2545 alg_in
->cost
.cost
+= op_cost
;
2546 alg_in
->cost
.latency
+= op_cost
;
2547 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2549 struct algorithm
*x
;
2550 best_cost
= alg_in
->cost
;
2551 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2552 best_alg
->log
[best_alg
->ops
] = 0;
2553 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2558 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2560 op_cost
= add_cost
[speed
][mode
];
2561 new_limit
.cost
= best_cost
.cost
- op_cost
;
2562 new_limit
.latency
= best_cost
.latency
- op_cost
;
2563 synth_mult (alg_in
, t
- 1, &new_limit
, mode
);
2565 alg_in
->cost
.cost
+= op_cost
;
2566 alg_in
->cost
.latency
+= op_cost
;
2567 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2569 struct algorithm
*x
;
2570 best_cost
= alg_in
->cost
;
2571 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2572 best_alg
->log
[best_alg
->ops
] = 0;
2573 best_alg
->op
[best_alg
->ops
] = alg_add_t_m2
;
2577 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2578 quickly with a - a * n for some appropriate constant n. */
2579 m
= exact_log2 (-orig_t
+ 1);
2580 if (m
>= 0 && m
< maxm
)
2582 op_cost
= shiftsub1_cost
[speed
][mode
][m
];
2583 new_limit
.cost
= best_cost
.cost
- op_cost
;
2584 new_limit
.latency
= best_cost
.latency
- op_cost
;
2585 synth_mult (alg_in
, (unsigned HOST_WIDE_INT
) (-orig_t
+ 1) >> m
, &new_limit
, mode
);
2587 alg_in
->cost
.cost
+= op_cost
;
2588 alg_in
->cost
.latency
+= op_cost
;
2589 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2591 struct algorithm
*x
;
2592 best_cost
= alg_in
->cost
;
2593 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2594 best_alg
->log
[best_alg
->ops
] = m
;
2595 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2603 /* Look for factors of t of the form
2604 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2605 If we find such a factor, we can multiply by t using an algorithm that
2606 multiplies by q, shift the result by m and add/subtract it to itself.
2608 We search for large factors first and loop down, even if large factors
2609 are less probable than small; if we find a large factor we will find a
2610 good sequence quickly, and therefore be able to prune (by decreasing
2611 COST_LIMIT) the search. */
2613 do_alg_addsub_factor
:
2614 for (m
= floor_log2 (t
- 1); m
>= 2; m
--)
2616 unsigned HOST_WIDE_INT d
;
2618 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) + 1;
2619 if (t
% d
== 0 && t
> d
&& m
< maxm
2620 && (!cache_hit
|| cache_alg
== alg_add_factor
))
2622 /* If the target has a cheap shift-and-add instruction use
2623 that in preference to a shift insn followed by an add insn.
2624 Assume that the shift-and-add is "atomic" with a latency
2625 equal to its cost, otherwise assume that on superscalar
2626 hardware the shift may be executed concurrently with the
2627 earlier steps in the algorithm. */
2628 op_cost
= add_cost
[speed
][mode
] + shift_cost
[speed
][mode
][m
];
2629 if (shiftadd_cost
[speed
][mode
][m
] < op_cost
)
2631 op_cost
= shiftadd_cost
[speed
][mode
][m
];
2632 op_latency
= op_cost
;
2635 op_latency
= add_cost
[speed
][mode
];
2637 new_limit
.cost
= best_cost
.cost
- op_cost
;
2638 new_limit
.latency
= best_cost
.latency
- op_latency
;
2639 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2641 alg_in
->cost
.cost
+= op_cost
;
2642 alg_in
->cost
.latency
+= op_latency
;
2643 if (alg_in
->cost
.latency
< op_cost
)
2644 alg_in
->cost
.latency
= op_cost
;
2645 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2647 struct algorithm
*x
;
2648 best_cost
= alg_in
->cost
;
2649 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2650 best_alg
->log
[best_alg
->ops
] = m
;
2651 best_alg
->op
[best_alg
->ops
] = alg_add_factor
;
2653 /* Other factors will have been taken care of in the recursion. */
2657 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) - 1;
2658 if (t
% d
== 0 && t
> d
&& m
< maxm
2659 && (!cache_hit
|| cache_alg
== alg_sub_factor
))
2661 /* If the target has a cheap shift-and-subtract insn use
2662 that in preference to a shift insn followed by a sub insn.
2663 Assume that the shift-and-sub is "atomic" with a latency
2664 equal to it's cost, otherwise assume that on superscalar
2665 hardware the shift may be executed concurrently with the
2666 earlier steps in the algorithm. */
2667 op_cost
= add_cost
[speed
][mode
] + shift_cost
[speed
][mode
][m
];
2668 if (shiftsub0_cost
[speed
][mode
][m
] < op_cost
)
2670 op_cost
= shiftsub0_cost
[speed
][mode
][m
];
2671 op_latency
= op_cost
;
2674 op_latency
= add_cost
[speed
][mode
];
2676 new_limit
.cost
= best_cost
.cost
- op_cost
;
2677 new_limit
.latency
= best_cost
.latency
- op_latency
;
2678 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2680 alg_in
->cost
.cost
+= op_cost
;
2681 alg_in
->cost
.latency
+= op_latency
;
2682 if (alg_in
->cost
.latency
< op_cost
)
2683 alg_in
->cost
.latency
= op_cost
;
2684 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2686 struct algorithm
*x
;
2687 best_cost
= alg_in
->cost
;
2688 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2689 best_alg
->log
[best_alg
->ops
] = m
;
2690 best_alg
->op
[best_alg
->ops
] = alg_sub_factor
;
2698 /* Try shift-and-add (load effective address) instructions,
2699 i.e. do a*3, a*5, a*9. */
2706 if (m
>= 0 && m
< maxm
)
2708 op_cost
= shiftadd_cost
[speed
][mode
][m
];
2709 new_limit
.cost
= best_cost
.cost
- op_cost
;
2710 new_limit
.latency
= best_cost
.latency
- op_cost
;
2711 synth_mult (alg_in
, (t
- 1) >> m
, &new_limit
, mode
);
2713 alg_in
->cost
.cost
+= op_cost
;
2714 alg_in
->cost
.latency
+= op_cost
;
2715 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2717 struct algorithm
*x
;
2718 best_cost
= alg_in
->cost
;
2719 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2720 best_alg
->log
[best_alg
->ops
] = m
;
2721 best_alg
->op
[best_alg
->ops
] = alg_add_t2_m
;
2731 if (m
>= 0 && m
< maxm
)
2733 op_cost
= shiftsub0_cost
[speed
][mode
][m
];
2734 new_limit
.cost
= best_cost
.cost
- op_cost
;
2735 new_limit
.latency
= best_cost
.latency
- op_cost
;
2736 synth_mult (alg_in
, (t
+ 1) >> m
, &new_limit
, mode
);
2738 alg_in
->cost
.cost
+= op_cost
;
2739 alg_in
->cost
.latency
+= op_cost
;
2740 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2742 struct algorithm
*x
;
2743 best_cost
= alg_in
->cost
;
2744 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2745 best_alg
->log
[best_alg
->ops
] = m
;
2746 best_alg
->op
[best_alg
->ops
] = alg_sub_t2_m
;
2754 /* If best_cost has not decreased, we have not found any algorithm. */
2755 if (!CHEAPER_MULT_COST (&best_cost
, cost_limit
))
2757 /* We failed to find an algorithm. Record alg_impossible for
2758 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2759 we are asked to find an algorithm for T within the same or
2760 lower COST_LIMIT, we can immediately return to the
2762 alg_hash
[hash_index
].t
= t
;
2763 alg_hash
[hash_index
].mode
= mode
;
2764 alg_hash
[hash_index
].speed
= speed
;
2765 alg_hash
[hash_index
].alg
= alg_impossible
;
2766 alg_hash
[hash_index
].cost
= *cost_limit
;
2770 /* Cache the result. */
2773 alg_hash
[hash_index
].t
= t
;
2774 alg_hash
[hash_index
].mode
= mode
;
2775 alg_hash
[hash_index
].speed
= speed
;
2776 alg_hash
[hash_index
].alg
= best_alg
->op
[best_alg
->ops
];
2777 alg_hash
[hash_index
].cost
.cost
= best_cost
.cost
;
2778 alg_hash
[hash_index
].cost
.latency
= best_cost
.latency
;
2781 /* If we are getting a too long sequence for `struct algorithm'
2782 to record, make this search fail. */
2783 if (best_alg
->ops
== MAX_BITS_PER_WORD
)
2786 /* Copy the algorithm from temporary space to the space at alg_out.
2787 We avoid using structure assignment because the majority of
2788 best_alg is normally undefined, and this is a critical function. */
2789 alg_out
->ops
= best_alg
->ops
+ 1;
2790 alg_out
->cost
= best_cost
;
2791 memcpy (alg_out
->op
, best_alg
->op
,
2792 alg_out
->ops
* sizeof *alg_out
->op
);
2793 memcpy (alg_out
->log
, best_alg
->log
,
2794 alg_out
->ops
* sizeof *alg_out
->log
);
2797 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2798 Try three variations:
2800 - a shift/add sequence based on VAL itself
2801 - a shift/add sequence based on -VAL, followed by a negation
2802 - a shift/add sequence based on VAL - 1, followed by an addition.
2804 Return true if the cheapest of these cost less than MULT_COST,
2805 describing the algorithm in *ALG and final fixup in *VARIANT. */
2808 choose_mult_variant (enum machine_mode mode
, HOST_WIDE_INT val
,
2809 struct algorithm
*alg
, enum mult_variant
*variant
,
2812 struct algorithm alg2
;
2813 struct mult_cost limit
;
2815 bool speed
= optimize_insn_for_speed_p ();
2817 /* Fail quickly for impossible bounds. */
2821 /* Ensure that mult_cost provides a reasonable upper bound.
2822 Any constant multiplication can be performed with less
2823 than 2 * bits additions. */
2824 op_cost
= 2 * GET_MODE_BITSIZE (mode
) * add_cost
[speed
][mode
];
2825 if (mult_cost
> op_cost
)
2826 mult_cost
= op_cost
;
2828 *variant
= basic_variant
;
2829 limit
.cost
= mult_cost
;
2830 limit
.latency
= mult_cost
;
2831 synth_mult (alg
, val
, &limit
, mode
);
2833 /* This works only if the inverted value actually fits in an
2835 if (HOST_BITS_PER_INT
>= GET_MODE_BITSIZE (mode
))
2837 op_cost
= neg_cost
[speed
][mode
];
2838 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2840 limit
.cost
= alg
->cost
.cost
- op_cost
;
2841 limit
.latency
= alg
->cost
.latency
- op_cost
;
2845 limit
.cost
= mult_cost
- op_cost
;
2846 limit
.latency
= mult_cost
- op_cost
;
2849 synth_mult (&alg2
, -val
, &limit
, mode
);
2850 alg2
.cost
.cost
+= op_cost
;
2851 alg2
.cost
.latency
+= op_cost
;
2852 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2853 *alg
= alg2
, *variant
= negate_variant
;
2856 /* This proves very useful for division-by-constant. */
2857 op_cost
= add_cost
[speed
][mode
];
2858 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2860 limit
.cost
= alg
->cost
.cost
- op_cost
;
2861 limit
.latency
= alg
->cost
.latency
- op_cost
;
2865 limit
.cost
= mult_cost
- op_cost
;
2866 limit
.latency
= mult_cost
- op_cost
;
2869 synth_mult (&alg2
, val
- 1, &limit
, mode
);
2870 alg2
.cost
.cost
+= op_cost
;
2871 alg2
.cost
.latency
+= op_cost
;
2872 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2873 *alg
= alg2
, *variant
= add_variant
;
2875 return MULT_COST_LESS (&alg
->cost
, mult_cost
);
2878 /* A subroutine of expand_mult, used for constant multiplications.
2879 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2880 convenient. Use the shift/add sequence described by ALG and apply
2881 the final fixup specified by VARIANT. */
2884 expand_mult_const (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT val
,
2885 rtx target
, const struct algorithm
*alg
,
2886 enum mult_variant variant
)
2888 HOST_WIDE_INT val_so_far
;
2889 rtx insn
, accum
, tem
;
2891 enum machine_mode nmode
;
2893 /* Avoid referencing memory over and over and invalid sharing
2895 op0
= force_reg (mode
, op0
);
2897 /* ACCUM starts out either as OP0 or as a zero, depending on
2898 the first operation. */
2900 if (alg
->op
[0] == alg_zero
)
2902 accum
= copy_to_mode_reg (mode
, const0_rtx
);
2905 else if (alg
->op
[0] == alg_m
)
2907 accum
= copy_to_mode_reg (mode
, op0
);
2913 for (opno
= 1; opno
< alg
->ops
; opno
++)
2915 int log
= alg
->log
[opno
];
2916 rtx shift_subtarget
= optimize
? 0 : accum
;
2918 = (opno
== alg
->ops
- 1 && target
!= 0 && variant
!= add_variant
2921 rtx accum_target
= optimize
? 0 : accum
;
2923 switch (alg
->op
[opno
])
2926 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2927 build_int_cst (NULL_TREE
, log
),
2929 /* REG_EQUAL note will be attached to the following insn. */
2930 emit_move_insn (accum
, tem
);
2935 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2936 build_int_cst (NULL_TREE
, log
),
2938 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2939 add_target
? add_target
: accum_target
);
2940 val_so_far
+= (HOST_WIDE_INT
) 1 << log
;
2944 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2945 build_int_cst (NULL_TREE
, log
),
2947 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, tem
),
2948 add_target
? add_target
: accum_target
);
2949 val_so_far
-= (HOST_WIDE_INT
) 1 << log
;
2953 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2954 build_int_cst (NULL_TREE
, log
),
2957 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
),
2958 add_target
? add_target
: accum_target
);
2959 val_so_far
= (val_so_far
<< log
) + 1;
2963 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2964 build_int_cst (NULL_TREE
, log
),
2965 shift_subtarget
, 0);
2966 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, op0
),
2967 add_target
? add_target
: accum_target
);
2968 val_so_far
= (val_so_far
<< log
) - 1;
2971 case alg_add_factor
:
2972 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2973 build_int_cst (NULL_TREE
, log
),
2975 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2976 add_target
? add_target
: accum_target
);
2977 val_so_far
+= val_so_far
<< log
;
2980 case alg_sub_factor
:
2981 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2982 build_int_cst (NULL_TREE
, log
),
2984 accum
= force_operand (gen_rtx_MINUS (mode
, tem
, accum
),
2986 ? add_target
: (optimize
? 0 : tem
)));
2987 val_so_far
= (val_so_far
<< log
) - val_so_far
;
2994 /* Write a REG_EQUAL note on the last insn so that we can cse
2995 multiplication sequences. Note that if ACCUM is a SUBREG,
2996 we've set the inner register and must properly indicate
2999 tem
= op0
, nmode
= mode
;
3000 if (GET_CODE (accum
) == SUBREG
)
3002 nmode
= GET_MODE (SUBREG_REG (accum
));
3003 tem
= gen_lowpart (nmode
, op0
);
3006 insn
= get_last_insn ();
3007 set_unique_reg_note (insn
, REG_EQUAL
,
3008 gen_rtx_MULT (nmode
, tem
,
3009 GEN_INT (val_so_far
)));
3012 if (variant
== negate_variant
)
3014 val_so_far
= -val_so_far
;
3015 accum
= expand_unop (mode
, neg_optab
, accum
, target
, 0);
3017 else if (variant
== add_variant
)
3019 val_so_far
= val_so_far
+ 1;
3020 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
), target
);
3023 /* Compare only the bits of val and val_so_far that are significant
3024 in the result mode, to avoid sign-/zero-extension confusion. */
3025 val
&= GET_MODE_MASK (mode
);
3026 val_so_far
&= GET_MODE_MASK (mode
);
3027 gcc_assert (val
== val_so_far
);
3032 /* Perform a multiplication and return an rtx for the result.
3033 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3034 TARGET is a suggestion for where to store the result (an rtx).
3036 We check specially for a constant integer as OP1.
3037 If you want this check for OP0 as well, then before calling
3038 you should swap the two operands if OP0 would be constant. */
3041 expand_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3044 enum mult_variant variant
;
3045 struct algorithm algorithm
;
3047 bool speed
= optimize_insn_for_speed_p ();
3049 /* Handling const0_rtx here allows us to use zero as a rogue value for
3051 if (op1
== const0_rtx
)
3053 if (op1
== const1_rtx
)
3055 if (op1
== constm1_rtx
)
3056 return expand_unop (mode
,
3057 GET_MODE_CLASS (mode
) == MODE_INT
3058 && !unsignedp
&& flag_trapv
3059 ? negv_optab
: neg_optab
,
3062 /* These are the operations that are potentially turned into a sequence
3063 of shifts and additions. */
3064 if (SCALAR_INT_MODE_P (mode
)
3065 && (unsignedp
|| !flag_trapv
))
3067 HOST_WIDE_INT coeff
= 0;
3068 rtx fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3070 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3071 less than or equal in size to `unsigned int' this doesn't matter.
3072 If the mode is larger than `unsigned int', then synth_mult works
3073 only if the constant value exactly fits in an `unsigned int' without
3074 any truncation. This means that multiplying by negative values does
3075 not work; results are off by 2^32 on a 32 bit machine. */
3077 if (CONST_INT_P (op1
))
3079 /* Attempt to handle multiplication of DImode values by negative
3080 coefficients, by performing the multiplication by a positive
3081 multiplier and then inverting the result. */
3082 if (INTVAL (op1
) < 0
3083 && GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
)
3085 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3086 result is interpreted as an unsigned coefficient.
3087 Exclude cost of op0 from max_cost to match the cost
3088 calculation of the synth_mult. */
3089 max_cost
= rtx_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), SET
, speed
)
3090 - neg_cost
[speed
][mode
];
3092 && choose_mult_variant (mode
, -INTVAL (op1
), &algorithm
,
3093 &variant
, max_cost
))
3095 rtx temp
= expand_mult_const (mode
, op0
, -INTVAL (op1
),
3096 NULL_RTX
, &algorithm
,
3098 return expand_unop (mode
, neg_optab
, temp
, target
, 0);
3101 else coeff
= INTVAL (op1
);
3103 else if (GET_CODE (op1
) == CONST_DOUBLE
)
3105 /* If we are multiplying in DImode, it may still be a win
3106 to try to work with shifts and adds. */
3107 if (CONST_DOUBLE_HIGH (op1
) == 0
3108 && CONST_DOUBLE_LOW (op1
) > 0)
3109 coeff
= CONST_DOUBLE_LOW (op1
);
3110 else if (CONST_DOUBLE_LOW (op1
) == 0
3111 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1
)))
3113 int shift
= floor_log2 (CONST_DOUBLE_HIGH (op1
))
3114 + HOST_BITS_PER_WIDE_INT
;
3115 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3116 build_int_cst (NULL_TREE
, shift
),
3121 /* We used to test optimize here, on the grounds that it's better to
3122 produce a smaller program when -O is not used. But this causes
3123 such a terrible slowdown sometimes that it seems better to always
3127 /* Special case powers of two. */
3128 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3129 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3130 build_int_cst (NULL_TREE
, floor_log2 (coeff
)),
3133 /* Exclude cost of op0 from max_cost to match the cost
3134 calculation of the synth_mult. */
3135 max_cost
= rtx_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), SET
, speed
);
3136 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3138 return expand_mult_const (mode
, op0
, coeff
, target
,
3139 &algorithm
, variant
);
3143 if (GET_CODE (op0
) == CONST_DOUBLE
)
3150 /* Expand x*2.0 as x+x. */
3151 if (GET_CODE (op1
) == CONST_DOUBLE
3152 && SCALAR_FLOAT_MODE_P (mode
))
3155 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
3157 if (REAL_VALUES_EQUAL (d
, dconst2
))
3159 op0
= force_reg (GET_MODE (op0
), op0
);
3160 return expand_binop (mode
, add_optab
, op0
, op0
,
3161 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3165 /* This used to use umul_optab if unsigned, but for non-widening multiply
3166 there is no difference between signed and unsigned. */
3167 op0
= expand_binop (mode
,
3169 && flag_trapv
&& (GET_MODE_CLASS(mode
) == MODE_INT
)
3170 ? smulv_optab
: smul_optab
,
3171 op0
, op1
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
3176 /* Perform a widening multiplication and return an rtx for the result.
3177 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3178 TARGET is a suggestion for where to store the result (an rtx).
3179 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3180 or smul_widen_optab.
3182 We check specially for a constant integer as OP1, comparing the
3183 cost of a widening multiply against the cost of a sequence of shifts
3187 expand_widening_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3188 int unsignedp
, optab this_optab
)
3190 bool speed
= optimize_insn_for_speed_p ();
3192 if (CONST_INT_P (op1
)
3193 && (INTVAL (op1
) >= 0
3194 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
))
3196 HOST_WIDE_INT coeff
= INTVAL (op1
);
3198 enum mult_variant variant
;
3199 struct algorithm algorithm
;
3201 /* Special case powers of two. */
3202 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3204 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3205 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3206 build_int_cst (NULL_TREE
, floor_log2 (coeff
)),
3210 /* Exclude cost of op0 from max_cost to match the cost
3211 calculation of the synth_mult. */
3212 max_cost
= mul_widen_cost
[speed
][mode
];
3213 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3216 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3217 return expand_mult_const (mode
, op0
, coeff
, target
,
3218 &algorithm
, variant
);
3221 return expand_binop (mode
, this_optab
, op0
, op1
, target
,
3222 unsignedp
, OPTAB_LIB_WIDEN
);
3225 /* Return the smallest n such that 2**n >= X. */
3228 ceil_log2 (unsigned HOST_WIDE_INT x
)
3230 return floor_log2 (x
- 1) + 1;
3233 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3234 replace division by D, and put the least significant N bits of the result
3235 in *MULTIPLIER_PTR and return the most significant bit.
3237 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3238 needed precision is in PRECISION (should be <= N).
3240 PRECISION should be as small as possible so this function can choose
3241 multiplier more freely.
3243 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3244 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3246 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3247 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3250 unsigned HOST_WIDE_INT
3251 choose_multiplier (unsigned HOST_WIDE_INT d
, int n
, int precision
,
3252 rtx
*multiplier_ptr
, int *post_shift_ptr
, int *lgup_ptr
)
3254 HOST_WIDE_INT mhigh_hi
, mlow_hi
;
3255 unsigned HOST_WIDE_INT mhigh_lo
, mlow_lo
;
3256 int lgup
, post_shift
;
3258 unsigned HOST_WIDE_INT nl
, dummy1
;
3259 HOST_WIDE_INT nh
, dummy2
;
3261 /* lgup = ceil(log2(divisor)); */
3262 lgup
= ceil_log2 (d
);
3264 gcc_assert (lgup
<= n
);
3267 pow2
= n
+ lgup
- precision
;
3269 /* We could handle this with some effort, but this case is much
3270 better handled directly with a scc insn, so rely on caller using
3272 gcc_assert (pow
!= 2 * HOST_BITS_PER_WIDE_INT
);
3274 /* mlow = 2^(N + lgup)/d */
3275 if (pow
>= HOST_BITS_PER_WIDE_INT
)
3277 nh
= (HOST_WIDE_INT
) 1 << (pow
- HOST_BITS_PER_WIDE_INT
);
3283 nl
= (unsigned HOST_WIDE_INT
) 1 << pow
;
3285 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
3286 &mlow_lo
, &mlow_hi
, &dummy1
, &dummy2
);
3288 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3289 if (pow2
>= HOST_BITS_PER_WIDE_INT
)
3290 nh
|= (HOST_WIDE_INT
) 1 << (pow2
- HOST_BITS_PER_WIDE_INT
);
3292 nl
|= (unsigned HOST_WIDE_INT
) 1 << pow2
;
3293 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
3294 &mhigh_lo
, &mhigh_hi
, &dummy1
, &dummy2
);
3296 gcc_assert (!mhigh_hi
|| nh
- d
< d
);
3297 gcc_assert (mhigh_hi
<= 1 && mlow_hi
<= 1);
3298 /* Assert that mlow < mhigh. */
3299 gcc_assert (mlow_hi
< mhigh_hi
3300 || (mlow_hi
== mhigh_hi
&& mlow_lo
< mhigh_lo
));
3302 /* If precision == N, then mlow, mhigh exceed 2^N
3303 (but they do not exceed 2^(N+1)). */
3305 /* Reduce to lowest terms. */
3306 for (post_shift
= lgup
; post_shift
> 0; post_shift
--)
3308 unsigned HOST_WIDE_INT ml_lo
= (mlow_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mlow_lo
>> 1);
3309 unsigned HOST_WIDE_INT mh_lo
= (mhigh_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mhigh_lo
>> 1);
3319 *post_shift_ptr
= post_shift
;
3321 if (n
< HOST_BITS_PER_WIDE_INT
)
3323 unsigned HOST_WIDE_INT mask
= ((unsigned HOST_WIDE_INT
) 1 << n
) - 1;
3324 *multiplier_ptr
= GEN_INT (mhigh_lo
& mask
);
3325 return mhigh_lo
>= mask
;
3329 *multiplier_ptr
= GEN_INT (mhigh_lo
);
3334 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3335 congruent to 1 (mod 2**N). */
3337 static unsigned HOST_WIDE_INT
3338 invert_mod2n (unsigned HOST_WIDE_INT x
, int n
)
3340 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3342 /* The algorithm notes that the choice y = x satisfies
3343 x*y == 1 mod 2^3, since x is assumed odd.
3344 Each iteration doubles the number of bits of significance in y. */
3346 unsigned HOST_WIDE_INT mask
;
3347 unsigned HOST_WIDE_INT y
= x
;
3350 mask
= (n
== HOST_BITS_PER_WIDE_INT
3351 ? ~(unsigned HOST_WIDE_INT
) 0
3352 : ((unsigned HOST_WIDE_INT
) 1 << n
) - 1);
3356 y
= y
* (2 - x
*y
) & mask
; /* Modulo 2^N */
3362 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3363 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3364 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3365 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3368 The result is put in TARGET if that is convenient.
3370 MODE is the mode of operation. */
3373 expand_mult_highpart_adjust (enum machine_mode mode
, rtx adj_operand
, rtx op0
,
3374 rtx op1
, rtx target
, int unsignedp
)
3377 enum rtx_code adj_code
= unsignedp
? PLUS
: MINUS
;
3379 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3380 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
) - 1),
3382 tem
= expand_and (mode
, tem
, op1
, NULL_RTX
);
3384 = force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3387 tem
= expand_shift (RSHIFT_EXPR
, mode
, op1
,
3388 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
) - 1),
3390 tem
= expand_and (mode
, tem
, op0
, NULL_RTX
);
3391 target
= force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3397 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3400 extract_high_half (enum machine_mode mode
, rtx op
)
3402 enum machine_mode wider_mode
;
3404 if (mode
== word_mode
)
3405 return gen_highpart (mode
, op
);
3407 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3409 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3410 op
= expand_shift (RSHIFT_EXPR
, wider_mode
, op
,
3411 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
)), 0, 1);
3412 return convert_modes (mode
, wider_mode
, op
, 0);
3415 /* Like expand_mult_highpart, but only consider using a multiplication
3416 optab. OP1 is an rtx for the constant operand. */
3419 expand_mult_highpart_optab (enum machine_mode mode
, rtx op0
, rtx op1
,
3420 rtx target
, int unsignedp
, int max_cost
)
3422 rtx narrow_op1
= gen_int_mode (INTVAL (op1
), mode
);
3423 enum machine_mode wider_mode
;
3427 bool speed
= optimize_insn_for_speed_p ();
3429 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3431 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3432 size
= GET_MODE_BITSIZE (mode
);
3434 /* Firstly, try using a multiplication insn that only generates the needed
3435 high part of the product, and in the sign flavor of unsignedp. */
3436 if (mul_highpart_cost
[speed
][mode
] < max_cost
)
3438 moptab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
3439 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3440 unsignedp
, OPTAB_DIRECT
);
3445 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3446 Need to adjust the result after the multiplication. */
3447 if (size
- 1 < BITS_PER_WORD
3448 && (mul_highpart_cost
[speed
][mode
] + 2 * shift_cost
[speed
][mode
][size
-1]
3449 + 4 * add_cost
[speed
][mode
] < max_cost
))
3451 moptab
= unsignedp
? smul_highpart_optab
: umul_highpart_optab
;
3452 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3453 unsignedp
, OPTAB_DIRECT
);
3455 /* We used the wrong signedness. Adjust the result. */
3456 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3460 /* Try widening multiplication. */
3461 moptab
= unsignedp
? umul_widen_optab
: smul_widen_optab
;
3462 if (optab_handler (moptab
, wider_mode
) != CODE_FOR_nothing
3463 && mul_widen_cost
[speed
][wider_mode
] < max_cost
)
3465 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
, 0,
3466 unsignedp
, OPTAB_WIDEN
);
3468 return extract_high_half (mode
, tem
);
3471 /* Try widening the mode and perform a non-widening multiplication. */
3472 if (optab_handler (smul_optab
, wider_mode
) != CODE_FOR_nothing
3473 && size
- 1 < BITS_PER_WORD
3474 && mul_cost
[speed
][wider_mode
] + shift_cost
[speed
][mode
][size
-1] < max_cost
)
3476 rtx insns
, wop0
, wop1
;
3478 /* We need to widen the operands, for example to ensure the
3479 constant multiplier is correctly sign or zero extended.
3480 Use a sequence to clean-up any instructions emitted by
3481 the conversions if things don't work out. */
3483 wop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
3484 wop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
3485 tem
= expand_binop (wider_mode
, smul_optab
, wop0
, wop1
, 0,
3486 unsignedp
, OPTAB_WIDEN
);
3487 insns
= get_insns ();
3493 return extract_high_half (mode
, tem
);
3497 /* Try widening multiplication of opposite signedness, and adjust. */
3498 moptab
= unsignedp
? smul_widen_optab
: umul_widen_optab
;
3499 if (optab_handler (moptab
, wider_mode
) != CODE_FOR_nothing
3500 && size
- 1 < BITS_PER_WORD
3501 && (mul_widen_cost
[speed
][wider_mode
] + 2 * shift_cost
[speed
][mode
][size
-1]
3502 + 4 * add_cost
[speed
][mode
] < max_cost
))
3504 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
,
3505 NULL_RTX
, ! unsignedp
, OPTAB_WIDEN
);
3508 tem
= extract_high_half (mode
, tem
);
3509 /* We used the wrong signedness. Adjust the result. */
3510 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3518 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3519 putting the high half of the result in TARGET if that is convenient,
3520 and return where the result is. If the operation can not be performed,
3523 MODE is the mode of operation and result.
3525 UNSIGNEDP nonzero means unsigned multiply.
3527 MAX_COST is the total allowed cost for the expanded RTL. */
3530 expand_mult_highpart (enum machine_mode mode
, rtx op0
, rtx op1
,
3531 rtx target
, int unsignedp
, int max_cost
)
3533 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
3534 unsigned HOST_WIDE_INT cnst1
;
3536 bool sign_adjust
= false;
3537 enum mult_variant variant
;
3538 struct algorithm alg
;
3540 bool speed
= optimize_insn_for_speed_p ();
3542 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3543 /* We can't support modes wider than HOST_BITS_PER_INT. */
3544 gcc_assert (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
);
3546 cnst1
= INTVAL (op1
) & GET_MODE_MASK (mode
);
3548 /* We can't optimize modes wider than BITS_PER_WORD.
3549 ??? We might be able to perform double-word arithmetic if
3550 mode == word_mode, however all the cost calculations in
3551 synth_mult etc. assume single-word operations. */
3552 if (GET_MODE_BITSIZE (wider_mode
) > BITS_PER_WORD
)
3553 return expand_mult_highpart_optab (mode
, op0
, op1
, target
,
3554 unsignedp
, max_cost
);
3556 extra_cost
= shift_cost
[speed
][mode
][GET_MODE_BITSIZE (mode
) - 1];
3558 /* Check whether we try to multiply by a negative constant. */
3559 if (!unsignedp
&& ((cnst1
>> (GET_MODE_BITSIZE (mode
) - 1)) & 1))
3562 extra_cost
+= add_cost
[speed
][mode
];
3565 /* See whether shift/add multiplication is cheap enough. */
3566 if (choose_mult_variant (wider_mode
, cnst1
, &alg
, &variant
,
3567 max_cost
- extra_cost
))
3569 /* See whether the specialized multiplication optabs are
3570 cheaper than the shift/add version. */
3571 tem
= expand_mult_highpart_optab (mode
, op0
, op1
, target
, unsignedp
,
3572 alg
.cost
.cost
+ extra_cost
);
3576 tem
= convert_to_mode (wider_mode
, op0
, unsignedp
);
3577 tem
= expand_mult_const (wider_mode
, tem
, cnst1
, 0, &alg
, variant
);
3578 tem
= extract_high_half (mode
, tem
);
3580 /* Adjust result for signedness. */
3582 tem
= force_operand (gen_rtx_MINUS (mode
, tem
, op0
), tem
);
3586 return expand_mult_highpart_optab (mode
, op0
, op1
, target
,
3587 unsignedp
, max_cost
);
3591 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3594 expand_smod_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3596 unsigned HOST_WIDE_INT masklow
, maskhigh
;
3597 rtx result
, temp
, shift
, label
;
3600 logd
= floor_log2 (d
);
3601 result
= gen_reg_rtx (mode
);
3603 /* Avoid conditional branches when they're expensive. */
3604 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3605 && optimize_insn_for_speed_p ())
3607 rtx signmask
= emit_store_flag (result
, LT
, op0
, const0_rtx
,
3611 signmask
= force_reg (mode
, signmask
);
3612 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3613 shift
= GEN_INT (GET_MODE_BITSIZE (mode
) - logd
);
3615 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3616 which instruction sequence to use. If logical right shifts
3617 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3618 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3620 temp
= gen_rtx_LSHIFTRT (mode
, result
, shift
);
3621 if (optab_handler (lshr_optab
, mode
) == CODE_FOR_nothing
3622 || rtx_cost (temp
, SET
, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
3624 temp
= expand_binop (mode
, xor_optab
, op0
, signmask
,
3625 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3626 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3627 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3628 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3629 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3630 temp
= expand_binop (mode
, xor_optab
, temp
, signmask
,
3631 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3632 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3633 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3637 signmask
= expand_binop (mode
, lshr_optab
, signmask
, shift
,
3638 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3639 signmask
= force_reg (mode
, signmask
);
3641 temp
= expand_binop (mode
, add_optab
, op0
, signmask
,
3642 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3643 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3644 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3645 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3646 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3652 /* Mask contains the mode's signbit and the significant bits of the
3653 modulus. By including the signbit in the operation, many targets
3654 can avoid an explicit compare operation in the following comparison
3657 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3658 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3660 masklow
|= (HOST_WIDE_INT
) -1 << (GET_MODE_BITSIZE (mode
) - 1);
3664 maskhigh
= (HOST_WIDE_INT
) -1
3665 << (GET_MODE_BITSIZE (mode
) - HOST_BITS_PER_WIDE_INT
- 1);
3667 temp
= expand_binop (mode
, and_optab
, op0
,
3668 immed_double_const (masklow
, maskhigh
, mode
),
3669 result
, 1, OPTAB_LIB_WIDEN
);
3671 emit_move_insn (result
, temp
);
3673 label
= gen_label_rtx ();
3674 do_cmp_and_jump (result
, const0_rtx
, GE
, mode
, label
);
3676 temp
= expand_binop (mode
, sub_optab
, result
, const1_rtx
, result
,
3677 0, OPTAB_LIB_WIDEN
);
3678 masklow
= (HOST_WIDE_INT
) -1 << logd
;
3680 temp
= expand_binop (mode
, ior_optab
, temp
,
3681 immed_double_const (masklow
, maskhigh
, mode
),
3682 result
, 1, OPTAB_LIB_WIDEN
);
3683 temp
= expand_binop (mode
, add_optab
, temp
, const1_rtx
, result
,
3684 0, OPTAB_LIB_WIDEN
);
3686 emit_move_insn (result
, temp
);
3691 /* Expand signed division of OP0 by a power of two D in mode MODE.
3692 This routine is only called for positive values of D. */
3695 expand_sdiv_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3701 logd
= floor_log2 (d
);
3702 shift
= build_int_cst (NULL_TREE
, logd
);
3705 && BRANCH_COST (optimize_insn_for_speed_p (),
3708 temp
= gen_reg_rtx (mode
);
3709 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, 1);
3710 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3711 0, OPTAB_LIB_WIDEN
);
3712 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3715 #ifdef HAVE_conditional_move
3716 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3721 /* ??? emit_conditional_move forces a stack adjustment via
3722 compare_from_rtx so, if the sequence is discarded, it will
3723 be lost. Do it now instead. */
3724 do_pending_stack_adjust ();
3727 temp2
= copy_to_mode_reg (mode
, op0
);
3728 temp
= expand_binop (mode
, add_optab
, temp2
, GEN_INT (d
-1),
3729 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3730 temp
= force_reg (mode
, temp
);
3732 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3733 temp2
= emit_conditional_move (temp2
, LT
, temp2
, const0_rtx
,
3734 mode
, temp
, temp2
, mode
, 0);
3737 rtx seq
= get_insns ();
3740 return expand_shift (RSHIFT_EXPR
, mode
, temp2
, shift
, NULL_RTX
, 0);
3746 if (BRANCH_COST (optimize_insn_for_speed_p (),
3749 int ushift
= GET_MODE_BITSIZE (mode
) - logd
;
3751 temp
= gen_reg_rtx (mode
);
3752 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, -1);
3753 if (shift_cost
[optimize_insn_for_speed_p ()][mode
][ushift
] > COSTS_N_INSNS (1))
3754 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (d
- 1),
3755 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3757 temp
= expand_shift (RSHIFT_EXPR
, mode
, temp
,
3758 build_int_cst (NULL_TREE
, ushift
),
3760 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3761 0, OPTAB_LIB_WIDEN
);
3762 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3765 label
= gen_label_rtx ();
3766 temp
= copy_to_mode_reg (mode
, op0
);
3767 do_cmp_and_jump (temp
, const0_rtx
, GE
, mode
, label
);
3768 expand_inc (temp
, GEN_INT (d
- 1));
3770 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3773 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3774 if that is convenient, and returning where the result is.
3775 You may request either the quotient or the remainder as the result;
3776 specify REM_FLAG nonzero to get the remainder.
3778 CODE is the expression code for which kind of division this is;
3779 it controls how rounding is done. MODE is the machine mode to use.
3780 UNSIGNEDP nonzero means do unsigned division. */
3782 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3783 and then correct it by or'ing in missing high bits
3784 if result of ANDI is nonzero.
3785 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3786 This could optimize to a bfexts instruction.
3787 But C doesn't use these operations, so their optimizations are
3789 /* ??? For modulo, we don't actually need the highpart of the first product,
3790 the low part will do nicely. And for small divisors, the second multiply
3791 can also be a low-part only multiply or even be completely left out.
3792 E.g. to calculate the remainder of a division by 3 with a 32 bit
3793 multiply, multiply with 0x55555556 and extract the upper two bits;
3794 the result is exact for inputs up to 0x1fffffff.
3795 The input range can be reduced by using cross-sum rules.
3796 For odd divisors >= 3, the following table gives right shift counts
3797 so that if a number is shifted by an integer multiple of the given
3798 amount, the remainder stays the same:
3799 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3800 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3801 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3802 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3803 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3805 Cross-sum rules for even numbers can be derived by leaving as many bits
3806 to the right alone as the divisor has zeros to the right.
3807 E.g. if x is an unsigned 32 bit number:
3808 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3812 expand_divmod (int rem_flag
, enum tree_code code
, enum machine_mode mode
,
3813 rtx op0
, rtx op1
, rtx target
, int unsignedp
)
3815 enum machine_mode compute_mode
;
3817 rtx quotient
= 0, remainder
= 0;
3821 optab optab1
, optab2
;
3822 int op1_is_constant
, op1_is_pow2
= 0;
3823 int max_cost
, extra_cost
;
3824 static HOST_WIDE_INT last_div_const
= 0;
3825 static HOST_WIDE_INT ext_op1
;
3826 bool speed
= optimize_insn_for_speed_p ();
3828 op1_is_constant
= CONST_INT_P (op1
);
3829 if (op1_is_constant
)
3831 ext_op1
= INTVAL (op1
);
3833 ext_op1
&= GET_MODE_MASK (mode
);
3834 op1_is_pow2
= ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1
)
3835 || (! unsignedp
&& EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1
))));
3839 This is the structure of expand_divmod:
3841 First comes code to fix up the operands so we can perform the operations
3842 correctly and efficiently.
3844 Second comes a switch statement with code specific for each rounding mode.
3845 For some special operands this code emits all RTL for the desired
3846 operation, for other cases, it generates only a quotient and stores it in
3847 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3848 to indicate that it has not done anything.
3850 Last comes code that finishes the operation. If QUOTIENT is set and
3851 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3852 QUOTIENT is not set, it is computed using trunc rounding.
3854 We try to generate special code for division and remainder when OP1 is a
3855 constant. If |OP1| = 2**n we can use shifts and some other fast
3856 operations. For other values of OP1, we compute a carefully selected
3857 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3860 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3861 half of the product. Different strategies for generating the product are
3862 implemented in expand_mult_highpart.
3864 If what we actually want is the remainder, we generate that by another
3865 by-constant multiplication and a subtraction. */
3867 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3868 code below will malfunction if we are, so check here and handle
3869 the special case if so. */
3870 if (op1
== const1_rtx
)
3871 return rem_flag
? const0_rtx
: op0
;
3873 /* When dividing by -1, we could get an overflow.
3874 negv_optab can handle overflows. */
3875 if (! unsignedp
&& op1
== constm1_rtx
)
3879 return expand_unop (mode
, flag_trapv
&& GET_MODE_CLASS(mode
) == MODE_INT
3880 ? negv_optab
: neg_optab
, op0
, target
, 0);
3884 /* Don't use the function value register as a target
3885 since we have to read it as well as write it,
3886 and function-inlining gets confused by this. */
3887 && ((REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
3888 /* Don't clobber an operand while doing a multi-step calculation. */
3889 || ((rem_flag
|| op1_is_constant
)
3890 && (reg_mentioned_p (target
, op0
)
3891 || (MEM_P (op0
) && MEM_P (target
))))
3892 || reg_mentioned_p (target
, op1
)
3893 || (MEM_P (op1
) && MEM_P (target
))))
3896 /* Get the mode in which to perform this computation. Normally it will
3897 be MODE, but sometimes we can't do the desired operation in MODE.
3898 If so, pick a wider mode in which we can do the operation. Convert
3899 to that mode at the start to avoid repeated conversions.
3901 First see what operations we need. These depend on the expression
3902 we are evaluating. (We assume that divxx3 insns exist under the
3903 same conditions that modxx3 insns and that these insns don't normally
3904 fail. If these assumptions are not correct, we may generate less
3905 efficient code in some cases.)
3907 Then see if we find a mode in which we can open-code that operation
3908 (either a division, modulus, or shift). Finally, check for the smallest
3909 mode for which we can do the operation with a library call. */
3911 /* We might want to refine this now that we have division-by-constant
3912 optimization. Since expand_mult_highpart tries so many variants, it is
3913 not straightforward to generalize this. Maybe we should make an array
3914 of possible modes in init_expmed? Save this for GCC 2.7. */
3916 optab1
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3917 ? (unsignedp
? lshr_optab
: ashr_optab
)
3918 : (unsignedp
? udiv_optab
: sdiv_optab
));
3919 optab2
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3921 : (unsignedp
? udivmod_optab
: sdivmod_optab
));
3923 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3924 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3925 if (optab_handler (optab1
, compute_mode
) != CODE_FOR_nothing
3926 || optab_handler (optab2
, compute_mode
) != CODE_FOR_nothing
)
3929 if (compute_mode
== VOIDmode
)
3930 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3931 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3932 if (optab_libfunc (optab1
, compute_mode
)
3933 || optab_libfunc (optab2
, compute_mode
))
3936 /* If we still couldn't find a mode, use MODE, but expand_binop will
3938 if (compute_mode
== VOIDmode
)
3939 compute_mode
= mode
;
3941 if (target
&& GET_MODE (target
) == compute_mode
)
3944 tquotient
= gen_reg_rtx (compute_mode
);
3946 size
= GET_MODE_BITSIZE (compute_mode
);
3948 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3949 (mode), and thereby get better code when OP1 is a constant. Do that
3950 later. It will require going over all usages of SIZE below. */
3951 size
= GET_MODE_BITSIZE (mode
);
3954 /* Only deduct something for a REM if the last divide done was
3955 for a different constant. Then set the constant of the last
3957 max_cost
= unsignedp
? udiv_cost
[speed
][compute_mode
] : sdiv_cost
[speed
][compute_mode
];
3958 if (rem_flag
&& ! (last_div_const
!= 0 && op1_is_constant
3959 && INTVAL (op1
) == last_div_const
))
3960 max_cost
-= mul_cost
[speed
][compute_mode
] + add_cost
[speed
][compute_mode
];
3962 last_div_const
= ! rem_flag
&& op1_is_constant
? INTVAL (op1
) : 0;
3964 /* Now convert to the best mode to use. */
3965 if (compute_mode
!= mode
)
3967 op0
= convert_modes (compute_mode
, mode
, op0
, unsignedp
);
3968 op1
= convert_modes (compute_mode
, mode
, op1
, unsignedp
);
3970 /* convert_modes may have placed op1 into a register, so we
3971 must recompute the following. */
3972 op1_is_constant
= CONST_INT_P (op1
);
3973 op1_is_pow2
= (op1_is_constant
3974 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
3976 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1
)))))) ;
3979 /* If one of the operands is a volatile MEM, copy it into a register. */
3981 if (MEM_P (op0
) && MEM_VOLATILE_P (op0
))
3982 op0
= force_reg (compute_mode
, op0
);
3983 if (MEM_P (op1
) && MEM_VOLATILE_P (op1
))
3984 op1
= force_reg (compute_mode
, op1
);
3986 /* If we need the remainder or if OP1 is constant, we need to
3987 put OP0 in a register in case it has any queued subexpressions. */
3988 if (rem_flag
|| op1_is_constant
)
3989 op0
= force_reg (compute_mode
, op0
);
3991 last
= get_last_insn ();
3993 /* Promote floor rounding to trunc rounding for unsigned operations. */
3996 if (code
== FLOOR_DIV_EXPR
)
3997 code
= TRUNC_DIV_EXPR
;
3998 if (code
== FLOOR_MOD_EXPR
)
3999 code
= TRUNC_MOD_EXPR
;
4000 if (code
== EXACT_DIV_EXPR
&& op1_is_pow2
)
4001 code
= TRUNC_DIV_EXPR
;
4004 if (op1
!= const0_rtx
)
4007 case TRUNC_MOD_EXPR
:
4008 case TRUNC_DIV_EXPR
:
4009 if (op1_is_constant
)
4013 unsigned HOST_WIDE_INT mh
;
4014 int pre_shift
, post_shift
;
4017 unsigned HOST_WIDE_INT d
= (INTVAL (op1
)
4018 & GET_MODE_MASK (compute_mode
));
4020 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4022 pre_shift
= floor_log2 (d
);
4026 = expand_binop (compute_mode
, and_optab
, op0
,
4027 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4031 return gen_lowpart (mode
, remainder
);
4033 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4034 build_int_cst (NULL_TREE
,
4038 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4040 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (size
- 1)))
4042 /* Most significant bit of divisor is set; emit an scc
4044 quotient
= emit_store_flag_force (tquotient
, GEU
, op0
, op1
,
4045 compute_mode
, 1, 1);
4049 /* Find a suitable multiplier and right shift count
4050 instead of multiplying with D. */
4052 mh
= choose_multiplier (d
, size
, size
,
4053 &ml
, &post_shift
, &dummy
);
4055 /* If the suggested multiplier is more than SIZE bits,
4056 we can do better for even divisors, using an
4057 initial right shift. */
4058 if (mh
!= 0 && (d
& 1) == 0)
4060 pre_shift
= floor_log2 (d
& -d
);
4061 mh
= choose_multiplier (d
>> pre_shift
, size
,
4063 &ml
, &post_shift
, &dummy
);
4073 if (post_shift
- 1 >= BITS_PER_WORD
)
4077 = (shift_cost
[speed
][compute_mode
][post_shift
- 1]
4078 + shift_cost
[speed
][compute_mode
][1]
4079 + 2 * add_cost
[speed
][compute_mode
]);
4080 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
4082 max_cost
- extra_cost
);
4085 t2
= force_operand (gen_rtx_MINUS (compute_mode
,
4088 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
, t2
,
4089 integer_one_node
, NULL_RTX
, 1);
4090 t4
= force_operand (gen_rtx_PLUS (compute_mode
,
4093 quotient
= expand_shift
4094 (RSHIFT_EXPR
, compute_mode
, t4
,
4095 build_int_cst (NULL_TREE
, post_shift
- 1),
4102 if (pre_shift
>= BITS_PER_WORD
4103 || post_shift
>= BITS_PER_WORD
)
4107 (RSHIFT_EXPR
, compute_mode
, op0
,
4108 build_int_cst (NULL_TREE
, pre_shift
),
4111 = (shift_cost
[speed
][compute_mode
][pre_shift
]
4112 + shift_cost
[speed
][compute_mode
][post_shift
]);
4113 t2
= expand_mult_highpart (compute_mode
, t1
, ml
,
4115 max_cost
- extra_cost
);
4118 quotient
= expand_shift
4119 (RSHIFT_EXPR
, compute_mode
, t2
,
4120 build_int_cst (NULL_TREE
, post_shift
),
4125 else /* Too wide mode to use tricky code */
4128 insn
= get_last_insn ();
4130 && (set
= single_set (insn
)) != 0
4131 && SET_DEST (set
) == quotient
)
4132 set_unique_reg_note (insn
,
4134 gen_rtx_UDIV (compute_mode
, op0
, op1
));
4136 else /* TRUNC_DIV, signed */
4138 unsigned HOST_WIDE_INT ml
;
4139 int lgup
, post_shift
;
4141 HOST_WIDE_INT d
= INTVAL (op1
);
4142 unsigned HOST_WIDE_INT abs_d
;
4144 /* Since d might be INT_MIN, we have to cast to
4145 unsigned HOST_WIDE_INT before negating to avoid
4146 undefined signed overflow. */
4148 ? (unsigned HOST_WIDE_INT
) d
4149 : - (unsigned HOST_WIDE_INT
) d
);
4151 /* n rem d = n rem -d */
4152 if (rem_flag
&& d
< 0)
4155 op1
= gen_int_mode (abs_d
, compute_mode
);
4161 quotient
= expand_unop (compute_mode
, neg_optab
, op0
,
4163 else if (HOST_BITS_PER_WIDE_INT
>= size
4164 && abs_d
== (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4166 /* This case is not handled correctly below. */
4167 quotient
= emit_store_flag (tquotient
, EQ
, op0
, op1
,
4168 compute_mode
, 1, 1);
4172 else if (EXACT_POWER_OF_2_OR_ZERO_P (d
)
4173 && (rem_flag
? smod_pow2_cheap
[speed
][compute_mode
]
4174 : sdiv_pow2_cheap
[speed
][compute_mode
])
4175 /* We assume that cheap metric is true if the
4176 optab has an expander for this mode. */
4177 && ((optab_handler ((rem_flag
? smod_optab
4180 != CODE_FOR_nothing
)
4181 || (optab_handler (sdivmod_optab
,
4183 != CODE_FOR_nothing
)))
4185 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d
))
4189 remainder
= expand_smod_pow2 (compute_mode
, op0
, d
);
4191 return gen_lowpart (mode
, remainder
);
4194 if (sdiv_pow2_cheap
[speed
][compute_mode
]
4195 && ((optab_handler (sdiv_optab
, compute_mode
)
4196 != CODE_FOR_nothing
)
4197 || (optab_handler (sdivmod_optab
, compute_mode
)
4198 != CODE_FOR_nothing
)))
4199 quotient
= expand_divmod (0, TRUNC_DIV_EXPR
,
4201 gen_int_mode (abs_d
,
4205 quotient
= expand_sdiv_pow2 (compute_mode
, op0
, abs_d
);
4207 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4208 negate the quotient. */
4211 insn
= get_last_insn ();
4213 && (set
= single_set (insn
)) != 0
4214 && SET_DEST (set
) == quotient
4215 && abs_d
< ((unsigned HOST_WIDE_INT
) 1
4216 << (HOST_BITS_PER_WIDE_INT
- 1)))
4217 set_unique_reg_note (insn
,
4219 gen_rtx_DIV (compute_mode
,
4226 quotient
= expand_unop (compute_mode
, neg_optab
,
4227 quotient
, quotient
, 0);
4230 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4232 choose_multiplier (abs_d
, size
, size
- 1,
4233 &mlr
, &post_shift
, &lgup
);
4234 ml
= (unsigned HOST_WIDE_INT
) INTVAL (mlr
);
4235 if (ml
< (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4239 if (post_shift
>= BITS_PER_WORD
4240 || size
- 1 >= BITS_PER_WORD
)
4243 extra_cost
= (shift_cost
[speed
][compute_mode
][post_shift
]
4244 + shift_cost
[speed
][compute_mode
][size
- 1]
4245 + add_cost
[speed
][compute_mode
]);
4246 t1
= expand_mult_highpart (compute_mode
, op0
, mlr
,
4248 max_cost
- extra_cost
);
4252 (RSHIFT_EXPR
, compute_mode
, t1
,
4253 build_int_cst (NULL_TREE
, post_shift
),
4256 (RSHIFT_EXPR
, compute_mode
, op0
,
4257 build_int_cst (NULL_TREE
, size
- 1),
4261 = force_operand (gen_rtx_MINUS (compute_mode
,
4266 = force_operand (gen_rtx_MINUS (compute_mode
,
4274 if (post_shift
>= BITS_PER_WORD
4275 || size
- 1 >= BITS_PER_WORD
)
4278 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (size
- 1);
4279 mlr
= gen_int_mode (ml
, compute_mode
);
4280 extra_cost
= (shift_cost
[speed
][compute_mode
][post_shift
]
4281 + shift_cost
[speed
][compute_mode
][size
- 1]
4282 + 2 * add_cost
[speed
][compute_mode
]);
4283 t1
= expand_mult_highpart (compute_mode
, op0
, mlr
,
4285 max_cost
- extra_cost
);
4288 t2
= force_operand (gen_rtx_PLUS (compute_mode
,
4292 (RSHIFT_EXPR
, compute_mode
, t2
,
4293 build_int_cst (NULL_TREE
, post_shift
),
4296 (RSHIFT_EXPR
, compute_mode
, op0
,
4297 build_int_cst (NULL_TREE
, size
- 1),
4301 = force_operand (gen_rtx_MINUS (compute_mode
,
4306 = force_operand (gen_rtx_MINUS (compute_mode
,
4311 else /* Too wide mode to use tricky code */
4314 insn
= get_last_insn ();
4316 && (set
= single_set (insn
)) != 0
4317 && SET_DEST (set
) == quotient
)
4318 set_unique_reg_note (insn
,
4320 gen_rtx_DIV (compute_mode
, op0
, op1
));
4325 delete_insns_since (last
);
4328 case FLOOR_DIV_EXPR
:
4329 case FLOOR_MOD_EXPR
:
4330 /* We will come here only for signed operations. */
4331 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4333 unsigned HOST_WIDE_INT mh
;
4334 int pre_shift
, lgup
, post_shift
;
4335 HOST_WIDE_INT d
= INTVAL (op1
);
4340 /* We could just as easily deal with negative constants here,
4341 but it does not seem worth the trouble for GCC 2.6. */
4342 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4344 pre_shift
= floor_log2 (d
);
4347 remainder
= expand_binop (compute_mode
, and_optab
, op0
,
4348 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4349 remainder
, 0, OPTAB_LIB_WIDEN
);
4351 return gen_lowpart (mode
, remainder
);
4353 quotient
= expand_shift
4354 (RSHIFT_EXPR
, compute_mode
, op0
,
4355 build_int_cst (NULL_TREE
, pre_shift
),
4362 mh
= choose_multiplier (d
, size
, size
- 1,
4363 &ml
, &post_shift
, &lgup
);
4366 if (post_shift
< BITS_PER_WORD
4367 && size
- 1 < BITS_PER_WORD
)
4370 (RSHIFT_EXPR
, compute_mode
, op0
,
4371 build_int_cst (NULL_TREE
, size
- 1),
4373 t2
= expand_binop (compute_mode
, xor_optab
, op0
, t1
,
4374 NULL_RTX
, 0, OPTAB_WIDEN
);
4375 extra_cost
= (shift_cost
[speed
][compute_mode
][post_shift
]
4376 + shift_cost
[speed
][compute_mode
][size
- 1]
4377 + 2 * add_cost
[speed
][compute_mode
]);
4378 t3
= expand_mult_highpart (compute_mode
, t2
, ml
,
4380 max_cost
- extra_cost
);
4384 (RSHIFT_EXPR
, compute_mode
, t3
,
4385 build_int_cst (NULL_TREE
, post_shift
),
4387 quotient
= expand_binop (compute_mode
, xor_optab
,
4388 t4
, t1
, tquotient
, 0,
4396 rtx nsign
, t1
, t2
, t3
, t4
;
4397 t1
= force_operand (gen_rtx_PLUS (compute_mode
,
4398 op0
, constm1_rtx
), NULL_RTX
);
4399 t2
= expand_binop (compute_mode
, ior_optab
, op0
, t1
, NULL_RTX
,
4401 nsign
= expand_shift
4402 (RSHIFT_EXPR
, compute_mode
, t2
,
4403 build_int_cst (NULL_TREE
, size
- 1),
4405 t3
= force_operand (gen_rtx_MINUS (compute_mode
, t1
, nsign
),
4407 t4
= expand_divmod (0, TRUNC_DIV_EXPR
, compute_mode
, t3
, op1
,
4412 t5
= expand_unop (compute_mode
, one_cmpl_optab
, nsign
,
4414 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4423 delete_insns_since (last
);
4425 /* Try using an instruction that produces both the quotient and
4426 remainder, using truncation. We can easily compensate the quotient
4427 or remainder to get floor rounding, once we have the remainder.
4428 Notice that we compute also the final remainder value here,
4429 and return the result right away. */
4430 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4431 target
= gen_reg_rtx (compute_mode
);
4436 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4437 quotient
= gen_reg_rtx (compute_mode
);
4442 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4443 remainder
= gen_reg_rtx (compute_mode
);
4446 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
,
4447 quotient
, remainder
, 0))
4449 /* This could be computed with a branch-less sequence.
4450 Save that for later. */
4452 rtx label
= gen_label_rtx ();
4453 do_cmp_and_jump (remainder
, const0_rtx
, EQ
, compute_mode
, label
);
4454 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4455 NULL_RTX
, 0, OPTAB_WIDEN
);
4456 do_cmp_and_jump (tem
, const0_rtx
, GE
, compute_mode
, label
);
4457 expand_dec (quotient
, const1_rtx
);
4458 expand_inc (remainder
, op1
);
4460 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4463 /* No luck with division elimination or divmod. Have to do it
4464 by conditionally adjusting op0 *and* the result. */
4466 rtx label1
, label2
, label3
, label4
, label5
;
4470 quotient
= gen_reg_rtx (compute_mode
);
4471 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4472 label1
= gen_label_rtx ();
4473 label2
= gen_label_rtx ();
4474 label3
= gen_label_rtx ();
4475 label4
= gen_label_rtx ();
4476 label5
= gen_label_rtx ();
4477 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4478 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
, compute_mode
, label1
);
4479 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4480 quotient
, 0, OPTAB_LIB_WIDEN
);
4481 if (tem
!= quotient
)
4482 emit_move_insn (quotient
, tem
);
4483 emit_jump_insn (gen_jump (label5
));
4485 emit_label (label1
);
4486 expand_inc (adjusted_op0
, const1_rtx
);
4487 emit_jump_insn (gen_jump (label4
));
4489 emit_label (label2
);
4490 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
, compute_mode
, label3
);
4491 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4492 quotient
, 0, OPTAB_LIB_WIDEN
);
4493 if (tem
!= quotient
)
4494 emit_move_insn (quotient
, tem
);
4495 emit_jump_insn (gen_jump (label5
));
4497 emit_label (label3
);
4498 expand_dec (adjusted_op0
, const1_rtx
);
4499 emit_label (label4
);
4500 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4501 quotient
, 0, OPTAB_LIB_WIDEN
);
4502 if (tem
!= quotient
)
4503 emit_move_insn (quotient
, tem
);
4504 expand_dec (quotient
, const1_rtx
);
4505 emit_label (label5
);
4513 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
)))
4516 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4517 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4518 build_int_cst (NULL_TREE
, floor_log2 (d
)),
4520 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4522 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4523 t3
= gen_reg_rtx (compute_mode
);
4524 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4525 compute_mode
, 1, 1);
4529 lab
= gen_label_rtx ();
4530 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4531 expand_inc (t1
, const1_rtx
);
4536 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4542 /* Try using an instruction that produces both the quotient and
4543 remainder, using truncation. We can easily compensate the
4544 quotient or remainder to get ceiling rounding, once we have the
4545 remainder. Notice that we compute also the final remainder
4546 value here, and return the result right away. */
4547 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4548 target
= gen_reg_rtx (compute_mode
);
4552 remainder
= (REG_P (target
)
4553 ? target
: gen_reg_rtx (compute_mode
));
4554 quotient
= gen_reg_rtx (compute_mode
);
4558 quotient
= (REG_P (target
)
4559 ? target
: gen_reg_rtx (compute_mode
));
4560 remainder
= gen_reg_rtx (compute_mode
);
4563 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
,
4566 /* This could be computed with a branch-less sequence.
4567 Save that for later. */
4568 rtx label
= gen_label_rtx ();
4569 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4570 compute_mode
, label
);
4571 expand_inc (quotient
, const1_rtx
);
4572 expand_dec (remainder
, op1
);
4574 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4577 /* No luck with division elimination or divmod. Have to do it
4578 by conditionally adjusting op0 *and* the result. */
4581 rtx adjusted_op0
, tem
;
4583 quotient
= gen_reg_rtx (compute_mode
);
4584 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4585 label1
= gen_label_rtx ();
4586 label2
= gen_label_rtx ();
4587 do_cmp_and_jump (adjusted_op0
, const0_rtx
, NE
,
4588 compute_mode
, label1
);
4589 emit_move_insn (quotient
, const0_rtx
);
4590 emit_jump_insn (gen_jump (label2
));
4592 emit_label (label1
);
4593 expand_dec (adjusted_op0
, const1_rtx
);
4594 tem
= expand_binop (compute_mode
, udiv_optab
, adjusted_op0
, op1
,
4595 quotient
, 1, OPTAB_LIB_WIDEN
);
4596 if (tem
!= quotient
)
4597 emit_move_insn (quotient
, tem
);
4598 expand_inc (quotient
, const1_rtx
);
4599 emit_label (label2
);
4604 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4605 && INTVAL (op1
) >= 0)
4607 /* This is extremely similar to the code for the unsigned case
4608 above. For 2.7 we should merge these variants, but for
4609 2.6.1 I don't want to touch the code for unsigned since that
4610 get used in C. The signed case will only be used by other
4614 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4615 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4616 build_int_cst (NULL_TREE
, floor_log2 (d
)),
4618 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4620 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4621 t3
= gen_reg_rtx (compute_mode
);
4622 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4623 compute_mode
, 1, 1);
4627 lab
= gen_label_rtx ();
4628 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4629 expand_inc (t1
, const1_rtx
);
4634 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4640 /* Try using an instruction that produces both the quotient and
4641 remainder, using truncation. We can easily compensate the
4642 quotient or remainder to get ceiling rounding, once we have the
4643 remainder. Notice that we compute also the final remainder
4644 value here, and return the result right away. */
4645 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4646 target
= gen_reg_rtx (compute_mode
);
4649 remainder
= (REG_P (target
)
4650 ? target
: gen_reg_rtx (compute_mode
));
4651 quotient
= gen_reg_rtx (compute_mode
);
4655 quotient
= (REG_P (target
)
4656 ? target
: gen_reg_rtx (compute_mode
));
4657 remainder
= gen_reg_rtx (compute_mode
);
4660 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
,
4663 /* This could be computed with a branch-less sequence.
4664 Save that for later. */
4666 rtx label
= gen_label_rtx ();
4667 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4668 compute_mode
, label
);
4669 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4670 NULL_RTX
, 0, OPTAB_WIDEN
);
4671 do_cmp_and_jump (tem
, const0_rtx
, LT
, compute_mode
, label
);
4672 expand_inc (quotient
, const1_rtx
);
4673 expand_dec (remainder
, op1
);
4675 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4678 /* No luck with division elimination or divmod. Have to do it
4679 by conditionally adjusting op0 *and* the result. */
4681 rtx label1
, label2
, label3
, label4
, label5
;
4685 quotient
= gen_reg_rtx (compute_mode
);
4686 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4687 label1
= gen_label_rtx ();
4688 label2
= gen_label_rtx ();
4689 label3
= gen_label_rtx ();
4690 label4
= gen_label_rtx ();
4691 label5
= gen_label_rtx ();
4692 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4693 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
,
4694 compute_mode
, label1
);
4695 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4696 quotient
, 0, OPTAB_LIB_WIDEN
);
4697 if (tem
!= quotient
)
4698 emit_move_insn (quotient
, tem
);
4699 emit_jump_insn (gen_jump (label5
));
4701 emit_label (label1
);
4702 expand_dec (adjusted_op0
, const1_rtx
);
4703 emit_jump_insn (gen_jump (label4
));
4705 emit_label (label2
);
4706 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
,
4707 compute_mode
, label3
);
4708 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4709 quotient
, 0, OPTAB_LIB_WIDEN
);
4710 if (tem
!= quotient
)
4711 emit_move_insn (quotient
, tem
);
4712 emit_jump_insn (gen_jump (label5
));
4714 emit_label (label3
);
4715 expand_inc (adjusted_op0
, const1_rtx
);
4716 emit_label (label4
);
4717 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4718 quotient
, 0, OPTAB_LIB_WIDEN
);
4719 if (tem
!= quotient
)
4720 emit_move_insn (quotient
, tem
);
4721 expand_inc (quotient
, const1_rtx
);
4722 emit_label (label5
);
4727 case EXACT_DIV_EXPR
:
4728 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4730 HOST_WIDE_INT d
= INTVAL (op1
);
4731 unsigned HOST_WIDE_INT ml
;
4735 pre_shift
= floor_log2 (d
& -d
);
4736 ml
= invert_mod2n (d
>> pre_shift
, size
);
4737 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4738 build_int_cst (NULL_TREE
, pre_shift
),
4739 NULL_RTX
, unsignedp
);
4740 quotient
= expand_mult (compute_mode
, t1
,
4741 gen_int_mode (ml
, compute_mode
),
4744 insn
= get_last_insn ();
4745 set_unique_reg_note (insn
,
4747 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
4753 case ROUND_DIV_EXPR
:
4754 case ROUND_MOD_EXPR
:
4759 label
= gen_label_rtx ();
4760 quotient
= gen_reg_rtx (compute_mode
);
4761 remainder
= gen_reg_rtx (compute_mode
);
4762 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
, remainder
, 1) == 0)
4765 quotient
= expand_binop (compute_mode
, udiv_optab
, op0
, op1
,
4766 quotient
, 1, OPTAB_LIB_WIDEN
);
4767 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 1);
4768 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4769 remainder
, 1, OPTAB_LIB_WIDEN
);
4771 tem
= plus_constant (op1
, -1);
4772 tem
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4773 integer_one_node
, NULL_RTX
, 1);
4774 do_cmp_and_jump (remainder
, tem
, LEU
, compute_mode
, label
);
4775 expand_inc (quotient
, const1_rtx
);
4776 expand_dec (remainder
, op1
);
4781 rtx abs_rem
, abs_op1
, tem
, mask
;
4783 label
= gen_label_rtx ();
4784 quotient
= gen_reg_rtx (compute_mode
);
4785 remainder
= gen_reg_rtx (compute_mode
);
4786 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
, remainder
, 0) == 0)
4789 quotient
= expand_binop (compute_mode
, sdiv_optab
, op0
, op1
,
4790 quotient
, 0, OPTAB_LIB_WIDEN
);
4791 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 0);
4792 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4793 remainder
, 0, OPTAB_LIB_WIDEN
);
4795 abs_rem
= expand_abs (compute_mode
, remainder
, NULL_RTX
, 1, 0);
4796 abs_op1
= expand_abs (compute_mode
, op1
, NULL_RTX
, 1, 0);
4797 tem
= expand_shift (LSHIFT_EXPR
, compute_mode
, abs_rem
,
4798 integer_one_node
, NULL_RTX
, 1);
4799 do_cmp_and_jump (tem
, abs_op1
, LTU
, compute_mode
, label
);
4800 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4801 NULL_RTX
, 0, OPTAB_WIDEN
);
4802 mask
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4803 build_int_cst (NULL_TREE
, size
- 1),
4805 tem
= expand_binop (compute_mode
, xor_optab
, mask
, const1_rtx
,
4806 NULL_RTX
, 0, OPTAB_WIDEN
);
4807 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4808 NULL_RTX
, 0, OPTAB_WIDEN
);
4809 expand_inc (quotient
, tem
);
4810 tem
= expand_binop (compute_mode
, xor_optab
, mask
, op1
,
4811 NULL_RTX
, 0, OPTAB_WIDEN
);
4812 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4813 NULL_RTX
, 0, OPTAB_WIDEN
);
4814 expand_dec (remainder
, tem
);
4817 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4825 if (target
&& GET_MODE (target
) != compute_mode
)
4830 /* Try to produce the remainder without producing the quotient.
4831 If we seem to have a divmod pattern that does not require widening,
4832 don't try widening here. We should really have a WIDEN argument
4833 to expand_twoval_binop, since what we'd really like to do here is
4834 1) try a mod insn in compute_mode
4835 2) try a divmod insn in compute_mode
4836 3) try a div insn in compute_mode and multiply-subtract to get
4838 4) try the same things with widening allowed. */
4840 = sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4843 ((optab_handler (optab2
, compute_mode
)
4844 != CODE_FOR_nothing
)
4845 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4848 /* No luck there. Can we do remainder and divide at once
4849 without a library call? */
4850 remainder
= gen_reg_rtx (compute_mode
);
4851 if (! expand_twoval_binop ((unsignedp
4855 NULL_RTX
, remainder
, unsignedp
))
4860 return gen_lowpart (mode
, remainder
);
4863 /* Produce the quotient. Try a quotient insn, but not a library call.
4864 If we have a divmod in this mode, use it in preference to widening
4865 the div (for this test we assume it will not fail). Note that optab2
4866 is set to the one of the two optabs that the call below will use. */
4868 = sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
4869 op0
, op1
, rem_flag
? NULL_RTX
: target
,
4871 ((optab_handler (optab2
, compute_mode
)
4872 != CODE_FOR_nothing
)
4873 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4877 /* No luck there. Try a quotient-and-remainder insn,
4878 keeping the quotient alone. */
4879 quotient
= gen_reg_rtx (compute_mode
);
4880 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
4882 quotient
, NULL_RTX
, unsignedp
))
4886 /* Still no luck. If we are not computing the remainder,
4887 use a library call for the quotient. */
4888 quotient
= sign_expand_binop (compute_mode
,
4889 udiv_optab
, sdiv_optab
,
4891 unsignedp
, OPTAB_LIB_WIDEN
);
4898 if (target
&& GET_MODE (target
) != compute_mode
)
4903 /* No divide instruction either. Use library for remainder. */
4904 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4906 unsignedp
, OPTAB_LIB_WIDEN
);
4907 /* No remainder function. Try a quotient-and-remainder
4908 function, keeping the remainder. */
4911 remainder
= gen_reg_rtx (compute_mode
);
4912 if (!expand_twoval_binop_libfunc
4913 (unsignedp
? udivmod_optab
: sdivmod_optab
,
4915 NULL_RTX
, remainder
,
4916 unsignedp
? UMOD
: MOD
))
4917 remainder
= NULL_RTX
;
4922 /* We divided. Now finish doing X - Y * (X / Y). */
4923 remainder
= expand_mult (compute_mode
, quotient
, op1
,
4924 NULL_RTX
, unsignedp
);
4925 remainder
= expand_binop (compute_mode
, sub_optab
, op0
,
4926 remainder
, target
, unsignedp
,
4931 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4934 /* Return a tree node with data type TYPE, describing the value of X.
4935 Usually this is an VAR_DECL, if there is no obvious better choice.
4936 X may be an expression, however we only support those expressions
4937 generated by loop.c. */
4940 make_tree (tree type
, rtx x
)
4944 switch (GET_CODE (x
))
4948 HOST_WIDE_INT hi
= 0;
4951 && !(TYPE_UNSIGNED (type
)
4952 && (GET_MODE_BITSIZE (TYPE_MODE (type
))
4953 < HOST_BITS_PER_WIDE_INT
)))
4956 t
= build_int_cst_wide (type
, INTVAL (x
), hi
);
4962 if (GET_MODE (x
) == VOIDmode
)
4963 t
= build_int_cst_wide (type
,
4964 CONST_DOUBLE_LOW (x
), CONST_DOUBLE_HIGH (x
));
4969 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
4970 t
= build_real (type
, d
);
4977 int units
= CONST_VECTOR_NUNITS (x
);
4978 tree itype
= TREE_TYPE (type
);
4983 /* Build a tree with vector elements. */
4984 for (i
= units
- 1; i
>= 0; --i
)
4986 rtx elt
= CONST_VECTOR_ELT (x
, i
);
4987 t
= tree_cons (NULL_TREE
, make_tree (itype
, elt
), t
);
4990 return build_vector (type
, t
);
4994 return fold_build2 (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4995 make_tree (type
, XEXP (x
, 1)));
4998 return fold_build2 (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4999 make_tree (type
, XEXP (x
, 1)));
5002 return fold_build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0)));
5005 return fold_build2 (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5006 make_tree (type
, XEXP (x
, 1)));
5009 return fold_build2 (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5010 make_tree (type
, XEXP (x
, 1)));
5013 t
= unsigned_type_for (type
);
5014 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5015 make_tree (t
, XEXP (x
, 0)),
5016 make_tree (type
, XEXP (x
, 1))));
5019 t
= signed_type_for (type
);
5020 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5021 make_tree (t
, XEXP (x
, 0)),
5022 make_tree (type
, XEXP (x
, 1))));
5025 if (TREE_CODE (type
) != REAL_TYPE
)
5026 t
= signed_type_for (type
);
5030 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5031 make_tree (t
, XEXP (x
, 0)),
5032 make_tree (t
, XEXP (x
, 1))));
5034 t
= unsigned_type_for (type
);
5035 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5036 make_tree (t
, XEXP (x
, 0)),
5037 make_tree (t
, XEXP (x
, 1))));
5041 t
= lang_hooks
.types
.type_for_mode (GET_MODE (XEXP (x
, 0)),
5042 GET_CODE (x
) == ZERO_EXTEND
);
5043 return fold_convert (type
, make_tree (t
, XEXP (x
, 0)));
5046 return make_tree (type
, XEXP (x
, 0));
5049 t
= SYMBOL_REF_DECL (x
);
5051 return fold_convert (type
, build_fold_addr_expr (t
));
5052 /* else fall through. */
5055 t
= build_decl (RTL_LOCATION (x
), VAR_DECL
, NULL_TREE
, type
);
5057 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5058 address mode to pointer mode. */
5059 if (POINTER_TYPE_P (type
))
5060 x
= convert_memory_address_addr_space
5061 (TYPE_MODE (type
), x
, TYPE_ADDR_SPACE (TREE_TYPE (type
)));
5063 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5064 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5065 t
->decl_with_rtl
.rtl
= x
;
5071 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5072 and returning TARGET.
5074 If TARGET is 0, a pseudo-register or constant is returned. */
5077 expand_and (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
)
5081 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
5082 tem
= simplify_binary_operation (AND
, mode
, op0
, op1
);
5084 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
5088 else if (tem
!= target
)
5089 emit_move_insn (target
, tem
);
5093 /* Helper function for emit_store_flag. */
5095 emit_cstore (rtx target
, enum insn_code icode
, enum rtx_code code
,
5096 enum machine_mode mode
, enum machine_mode compare_mode
,
5097 int unsignedp
, rtx x
, rtx y
, int normalizep
,
5098 enum machine_mode target_mode
)
5100 rtx op0
, last
, comparison
, subtarget
, pattern
;
5101 enum machine_mode result_mode
= insn_data
[(int) icode
].operand
[0].mode
;
5103 last
= get_last_insn ();
5104 x
= prepare_operand (icode
, x
, 2, mode
, compare_mode
, unsignedp
);
5105 y
= prepare_operand (icode
, y
, 3, mode
, compare_mode
, unsignedp
);
5106 comparison
= gen_rtx_fmt_ee (code
, result_mode
, x
, y
);
5108 || !insn_data
[icode
].operand
[2].predicate
5109 (x
, insn_data
[icode
].operand
[2].mode
)
5110 || !insn_data
[icode
].operand
[3].predicate
5111 (y
, insn_data
[icode
].operand
[3].mode
)
5112 || !insn_data
[icode
].operand
[1].predicate (comparison
, VOIDmode
))
5114 delete_insns_since (last
);
5118 if (target_mode
== VOIDmode
)
5119 target_mode
= result_mode
;
5121 target
= gen_reg_rtx (target_mode
);
5124 || !(insn_data
[(int) icode
].operand
[0].predicate (target
, result_mode
)))
5125 subtarget
= gen_reg_rtx (result_mode
);
5129 pattern
= GEN_FCN (icode
) (subtarget
, comparison
, x
, y
);
5132 emit_insn (pattern
);
5134 /* If we are converting to a wider mode, first convert to
5135 TARGET_MODE, then normalize. This produces better combining
5136 opportunities on machines that have a SIGN_EXTRACT when we are
5137 testing a single bit. This mostly benefits the 68k.
5139 If STORE_FLAG_VALUE does not have the sign bit set when
5140 interpreted in MODE, we can do this conversion as unsigned, which
5141 is usually more efficient. */
5142 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (result_mode
))
5144 convert_move (target
, subtarget
,
5145 (GET_MODE_BITSIZE (result_mode
) <= HOST_BITS_PER_WIDE_INT
)
5146 && 0 == (STORE_FLAG_VALUE
5147 & ((HOST_WIDE_INT
) 1
5148 << (GET_MODE_BITSIZE (result_mode
) -1))));
5150 result_mode
= target_mode
;
5155 /* If we want to keep subexpressions around, don't reuse our last
5160 /* Now normalize to the proper value in MODE. Sometimes we don't
5161 have to do anything. */
5162 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
5164 /* STORE_FLAG_VALUE might be the most negative number, so write
5165 the comparison this way to avoid a compiler-time warning. */
5166 else if (- normalizep
== STORE_FLAG_VALUE
)
5167 op0
= expand_unop (result_mode
, neg_optab
, op0
, subtarget
, 0);
5169 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5170 it hard to use a value of just the sign bit due to ANSI integer
5171 constant typing rules. */
5172 else if (GET_MODE_BITSIZE (result_mode
) <= HOST_BITS_PER_WIDE_INT
5173 && (STORE_FLAG_VALUE
5174 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (result_mode
) - 1))))
5175 op0
= expand_shift (RSHIFT_EXPR
, result_mode
, op0
,
5176 size_int (GET_MODE_BITSIZE (result_mode
) - 1), subtarget
,
5180 gcc_assert (STORE_FLAG_VALUE
& 1);
5182 op0
= expand_and (result_mode
, op0
, const1_rtx
, subtarget
);
5183 if (normalizep
== -1)
5184 op0
= expand_unop (result_mode
, neg_optab
, op0
, op0
, 0);
5187 /* If we were converting to a smaller mode, do the conversion now. */
5188 if (target_mode
!= result_mode
)
5190 convert_move (target
, op0
, 0);
5198 /* A subroutine of emit_store_flag only including "tricks" that do not
5199 need a recursive call. These are kept separate to avoid infinite
5203 emit_store_flag_1 (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5204 enum machine_mode mode
, int unsignedp
, int normalizep
,
5205 enum machine_mode target_mode
)
5208 enum insn_code icode
;
5209 enum machine_mode compare_mode
;
5210 enum mode_class mclass
;
5211 enum rtx_code scode
;
5215 code
= unsigned_condition (code
);
5216 scode
= swap_condition (code
);
5218 /* If one operand is constant, make it the second one. Only do this
5219 if the other operand is not constant as well. */
5221 if (swap_commutative_operands_p (op0
, op1
))
5226 code
= swap_condition (code
);
5229 if (mode
== VOIDmode
)
5230 mode
= GET_MODE (op0
);
5232 /* For some comparisons with 1 and -1, we can convert this to
5233 comparisons with zero. This will often produce more opportunities for
5234 store-flag insns. */
5239 if (op1
== const1_rtx
)
5240 op1
= const0_rtx
, code
= LE
;
5243 if (op1
== constm1_rtx
)
5244 op1
= const0_rtx
, code
= LT
;
5247 if (op1
== const1_rtx
)
5248 op1
= const0_rtx
, code
= GT
;
5251 if (op1
== constm1_rtx
)
5252 op1
= const0_rtx
, code
= GE
;
5255 if (op1
== const1_rtx
)
5256 op1
= const0_rtx
, code
= NE
;
5259 if (op1
== const1_rtx
)
5260 op1
= const0_rtx
, code
= EQ
;
5266 /* If we are comparing a double-word integer with zero or -1, we can
5267 convert the comparison into one involving a single word. */
5268 if (GET_MODE_BITSIZE (mode
) == BITS_PER_WORD
* 2
5269 && GET_MODE_CLASS (mode
) == MODE_INT
5270 && (!MEM_P (op0
) || ! MEM_VOLATILE_P (op0
)))
5272 if ((code
== EQ
|| code
== NE
)
5273 && (op1
== const0_rtx
|| op1
== constm1_rtx
))
5277 /* Do a logical OR or AND of the two words and compare the
5279 op00
= simplify_gen_subreg (word_mode
, op0
, mode
, 0);
5280 op01
= simplify_gen_subreg (word_mode
, op0
, mode
, UNITS_PER_WORD
);
5281 tem
= expand_binop (word_mode
,
5282 op1
== const0_rtx
? ior_optab
: and_optab
,
5283 op00
, op01
, NULL_RTX
, unsignedp
,
5287 tem
= emit_store_flag (NULL_RTX
, code
, tem
, op1
, word_mode
,
5288 unsignedp
, normalizep
);
5290 else if ((code
== LT
|| code
== GE
) && op1
== const0_rtx
)
5294 /* If testing the sign bit, can just test on high word. */
5295 op0h
= simplify_gen_subreg (word_mode
, op0
, mode
,
5296 subreg_highpart_offset (word_mode
,
5298 tem
= emit_store_flag (NULL_RTX
, code
, op0h
, op1
, word_mode
,
5299 unsignedp
, normalizep
);
5306 if (target_mode
== VOIDmode
|| GET_MODE (tem
) == target_mode
)
5309 target
= gen_reg_rtx (target_mode
);
5311 convert_move (target
, tem
,
5312 0 == ((normalizep
? normalizep
: STORE_FLAG_VALUE
)
5313 & ((HOST_WIDE_INT
) 1
5314 << (GET_MODE_BITSIZE (word_mode
) -1))));
5319 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5320 complement of A (for GE) and shifting the sign bit to the low bit. */
5321 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
5322 && GET_MODE_CLASS (mode
) == MODE_INT
5323 && (normalizep
|| STORE_FLAG_VALUE
== 1
5324 || (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5325 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
5326 == ((unsigned HOST_WIDE_INT
) 1
5327 << (GET_MODE_BITSIZE (mode
) - 1))))))
5334 /* If the result is to be wider than OP0, it is best to convert it
5335 first. If it is to be narrower, it is *incorrect* to convert it
5337 else if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (mode
))
5339 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5343 if (target_mode
!= mode
)
5347 op0
= expand_unop (mode
, one_cmpl_optab
, op0
,
5348 ((STORE_FLAG_VALUE
== 1 || normalizep
)
5349 ? 0 : subtarget
), 0);
5351 if (STORE_FLAG_VALUE
== 1 || normalizep
)
5352 /* If we are supposed to produce a 0/1 value, we want to do
5353 a logical shift from the sign bit to the low-order bit; for
5354 a -1/0 value, we do an arithmetic shift. */
5355 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5356 size_int (GET_MODE_BITSIZE (mode
) - 1),
5357 subtarget
, normalizep
!= -1);
5359 if (mode
!= target_mode
)
5360 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5365 mclass
= GET_MODE_CLASS (mode
);
5366 for (compare_mode
= mode
; compare_mode
!= VOIDmode
;
5367 compare_mode
= GET_MODE_WIDER_MODE (compare_mode
))
5369 enum machine_mode optab_mode
= mclass
== MODE_CC
? CCmode
: compare_mode
;
5370 icode
= optab_handler (cstore_optab
, optab_mode
);
5371 if (icode
!= CODE_FOR_nothing
)
5373 do_pending_stack_adjust ();
5374 tem
= emit_cstore (target
, icode
, code
, mode
, compare_mode
,
5375 unsignedp
, op0
, op1
, normalizep
, target_mode
);
5379 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5381 tem
= emit_cstore (target
, icode
, scode
, mode
, compare_mode
,
5382 unsignedp
, op1
, op0
, normalizep
, target_mode
);
5393 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5394 and storing in TARGET. Normally return TARGET.
5395 Return 0 if that cannot be done.
5397 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5398 it is VOIDmode, they cannot both be CONST_INT.
5400 UNSIGNEDP is for the case where we have to widen the operands
5401 to perform the operation. It says to use zero-extension.
5403 NORMALIZEP is 1 if we should convert the result to be either zero
5404 or one. Normalize is -1 if we should convert the result to be
5405 either zero or -1. If NORMALIZEP is zero, the result will be left
5406 "raw" out of the scc insn. */
5409 emit_store_flag (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5410 enum machine_mode mode
, int unsignedp
, int normalizep
)
5412 enum machine_mode target_mode
= target
? GET_MODE (target
) : VOIDmode
;
5413 enum rtx_code rcode
;
5415 rtx tem
, last
, trueval
;
5417 tem
= emit_store_flag_1 (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
,
5422 /* If we reached here, we can't do this with a scc insn, however there
5423 are some comparisons that can be done in other ways. Don't do any
5424 of these cases if branches are very cheap. */
5425 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5428 /* See what we need to return. We can only return a 1, -1, or the
5431 if (normalizep
== 0)
5433 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
5434 normalizep
= STORE_FLAG_VALUE
;
5436 else if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5437 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
5438 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))
5444 last
= get_last_insn ();
5446 /* If optimizing, use different pseudo registers for each insn, instead
5447 of reusing the same pseudo. This leads to better CSE, but slows
5448 down the compiler, since there are more pseudos */
5449 subtarget
= (!optimize
5450 && (target_mode
== mode
)) ? target
: NULL_RTX
;
5451 trueval
= GEN_INT (normalizep
? normalizep
: STORE_FLAG_VALUE
);
5453 /* For floating-point comparisons, try the reverse comparison or try
5454 changing the "orderedness" of the comparison. */
5455 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5457 enum rtx_code first_code
;
5460 rcode
= reverse_condition_maybe_unordered (code
);
5461 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5462 && (code
== ORDERED
|| code
== UNORDERED
5463 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5464 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5466 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5467 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5469 /* For the reverse comparison, use either an addition or a XOR. */
5471 && rtx_cost (GEN_INT (normalizep
), PLUS
,
5472 optimize_insn_for_speed_p ()) == 0)
5474 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5475 STORE_FLAG_VALUE
, target_mode
);
5477 return expand_binop (target_mode
, add_optab
, tem
,
5478 GEN_INT (normalizep
),
5479 target
, 0, OPTAB_WIDEN
);
5482 && rtx_cost (trueval
, XOR
,
5483 optimize_insn_for_speed_p ()) == 0)
5485 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5486 normalizep
, target_mode
);
5488 return expand_binop (target_mode
, xor_optab
, tem
, trueval
,
5489 target
, INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5493 delete_insns_since (last
);
5495 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5496 if (code
== ORDERED
|| code
== UNORDERED
)
5499 and_them
= split_comparison (code
, mode
, &first_code
, &code
);
5501 /* If there are no NaNs, the first comparison should always fall through.
5502 Effectively change the comparison to the other one. */
5503 if (!HONOR_NANS (mode
))
5505 gcc_assert (first_code
== (and_them
? ORDERED
: UNORDERED
));
5506 return emit_store_flag_1 (target
, code
, op0
, op1
, mode
, 0, normalizep
,
5510 #ifdef HAVE_conditional_move
5511 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5512 conditional move. */
5513 tem
= emit_store_flag_1 (subtarget
, first_code
, op0
, op1
, mode
, 0,
5514 normalizep
, target_mode
);
5519 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5520 tem
, const0_rtx
, GET_MODE (tem
), 0);
5522 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5523 trueval
, tem
, GET_MODE (tem
), 0);
5526 delete_insns_since (last
);
5533 /* The remaining tricks only apply to integer comparisons. */
5535 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5538 /* If this is an equality comparison of integers, we can try to exclusive-or
5539 (or subtract) the two operands and use a recursive call to try the
5540 comparison with zero. Don't do any of these cases if branches are
5543 if ((code
== EQ
|| code
== NE
) && op1
!= const0_rtx
)
5545 tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
5549 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
5552 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
5553 mode
, unsignedp
, normalizep
);
5557 delete_insns_since (last
);
5560 /* For integer comparisons, try the reverse comparison. However, for
5561 small X and if we'd have anyway to extend, implementing "X != 0"
5562 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5563 rcode
= reverse_condition (code
);
5564 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5565 && ! (optab_handler (cstore_optab
, mode
) == CODE_FOR_nothing
5567 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
5568 && op1
== const0_rtx
))
5570 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5571 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5573 /* Again, for the reverse comparison, use either an addition or a XOR. */
5575 && rtx_cost (GEN_INT (normalizep
), PLUS
,
5576 optimize_insn_for_speed_p ()) == 0)
5578 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5579 STORE_FLAG_VALUE
, target_mode
);
5581 tem
= expand_binop (target_mode
, add_optab
, tem
,
5582 GEN_INT (normalizep
), target
, 0, OPTAB_WIDEN
);
5585 && rtx_cost (trueval
, XOR
,
5586 optimize_insn_for_speed_p ()) == 0)
5588 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5589 normalizep
, target_mode
);
5591 tem
= expand_binop (target_mode
, xor_optab
, tem
, trueval
, target
,
5592 INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5597 delete_insns_since (last
);
5600 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5601 the constant zero. Reject all other comparisons at this point. Only
5602 do LE and GT if branches are expensive since they are expensive on
5603 2-operand machines. */
5605 if (op1
!= const0_rtx
5606 || (code
!= EQ
&& code
!= NE
5607 && (BRANCH_COST (optimize_insn_for_speed_p (),
5608 false) <= 1 || (code
!= LE
&& code
!= GT
))))
5611 /* Try to put the result of the comparison in the sign bit. Assume we can't
5612 do the necessary operation below. */
5616 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5617 the sign bit set. */
5621 /* This is destructive, so SUBTARGET can't be OP0. */
5622 if (rtx_equal_p (subtarget
, op0
))
5625 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
5628 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
5632 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5633 number of bits in the mode of OP0, minus one. */
5637 if (rtx_equal_p (subtarget
, op0
))
5640 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5641 size_int (GET_MODE_BITSIZE (mode
) - 1),
5643 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
5647 if (code
== EQ
|| code
== NE
)
5649 /* For EQ or NE, one way to do the comparison is to apply an operation
5650 that converts the operand into a positive number if it is nonzero
5651 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5652 for NE we negate. This puts the result in the sign bit. Then we
5653 normalize with a shift, if needed.
5655 Two operations that can do the above actions are ABS and FFS, so try
5656 them. If that doesn't work, and MODE is smaller than a full word,
5657 we can use zero-extension to the wider mode (an unsigned conversion)
5658 as the operation. */
5660 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5661 that is compensated by the subsequent overflow when subtracting
5664 if (optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)
5665 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
5666 else if (optab_handler (ffs_optab
, mode
) != CODE_FOR_nothing
)
5667 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
5668 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
5670 tem
= convert_modes (word_mode
, mode
, op0
, 1);
5677 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
5680 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
5683 /* If we couldn't do it that way, for NE we can "or" the two's complement
5684 of the value with itself. For EQ, we take the one's complement of
5685 that "or", which is an extra insn, so we only handle EQ if branches
5690 || BRANCH_COST (optimize_insn_for_speed_p (),
5693 if (rtx_equal_p (subtarget
, op0
))
5696 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
5697 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
5700 if (tem
&& code
== EQ
)
5701 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
5705 if (tem
&& normalizep
)
5706 tem
= expand_shift (RSHIFT_EXPR
, mode
, tem
,
5707 size_int (GET_MODE_BITSIZE (mode
) - 1),
5708 subtarget
, normalizep
== 1);
5714 else if (GET_MODE (tem
) != target_mode
)
5716 convert_move (target
, tem
, 0);
5719 else if (!subtarget
)
5721 emit_move_insn (target
, tem
);
5726 delete_insns_since (last
);
5731 /* Like emit_store_flag, but always succeeds. */
5734 emit_store_flag_force (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5735 enum machine_mode mode
, int unsignedp
, int normalizep
)
5738 rtx trueval
, falseval
;
5740 /* First see if emit_store_flag can do the job. */
5741 tem
= emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
);
5746 target
= gen_reg_rtx (word_mode
);
5748 /* If this failed, we have to do this with set/compare/jump/set code.
5749 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5750 trueval
= normalizep
? GEN_INT (normalizep
) : const1_rtx
;
5752 && GET_MODE_CLASS (mode
) == MODE_INT
5755 && op1
== const0_rtx
)
5757 label
= gen_label_rtx ();
5758 do_compare_rtx_and_jump (target
, const0_rtx
, EQ
, unsignedp
,
5759 mode
, NULL_RTX
, NULL_RTX
, label
, -1);
5760 emit_move_insn (target
, trueval
);
5766 || reg_mentioned_p (target
, op0
) || reg_mentioned_p (target
, op1
))
5767 target
= gen_reg_rtx (GET_MODE (target
));
5769 /* Jump in the right direction if the target cannot implement CODE
5770 but can jump on its reverse condition. */
5771 falseval
= const0_rtx
;
5772 if (! can_compare_p (code
, mode
, ccp_jump
)
5773 && (! FLOAT_MODE_P (mode
)
5774 || code
== ORDERED
|| code
== UNORDERED
5775 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5776 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5778 enum rtx_code rcode
;
5779 if (FLOAT_MODE_P (mode
))
5780 rcode
= reverse_condition_maybe_unordered (code
);
5782 rcode
= reverse_condition (code
);
5784 /* Canonicalize to UNORDERED for the libcall. */
5785 if (can_compare_p (rcode
, mode
, ccp_jump
)
5786 || (code
== ORDERED
&& ! can_compare_p (ORDERED
, mode
, ccp_jump
)))
5789 trueval
= const0_rtx
;
5794 emit_move_insn (target
, trueval
);
5795 label
= gen_label_rtx ();
5796 do_compare_rtx_and_jump (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
,
5797 NULL_RTX
, label
, -1);
5799 emit_move_insn (target
, falseval
);
5805 /* Perform possibly multi-word comparison and conditional jump to LABEL
5806 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5807 now a thin wrapper around do_compare_rtx_and_jump. */
5810 do_cmp_and_jump (rtx arg1
, rtx arg2
, enum rtx_code op
, enum machine_mode mode
,
5813 int unsignedp
= (op
== LTU
|| op
== LEU
|| op
== GTU
|| op
== GEU
);
5814 do_compare_rtx_and_jump (arg1
, arg2
, op
, unsignedp
, mode
,
5815 NULL_RTX
, NULL_RTX
, label
, -1);