1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
28 #include "diagnostic-core.h"
34 #include "insn-config.h"
38 #include "langhooks.h"
43 struct target_expmed default_target_expmed
;
45 struct target_expmed
*this_target_expmed
= &default_target_expmed
;
48 static void store_fixed_bit_field (rtx
, unsigned HOST_WIDE_INT
,
49 unsigned HOST_WIDE_INT
,
50 unsigned HOST_WIDE_INT
, rtx
);
51 static void store_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
52 unsigned HOST_WIDE_INT
, rtx
);
53 static rtx
extract_fixed_bit_field (enum machine_mode
, rtx
,
54 unsigned HOST_WIDE_INT
,
55 unsigned HOST_WIDE_INT
,
56 unsigned HOST_WIDE_INT
, rtx
, int, bool);
57 static rtx
mask_rtx (enum machine_mode
, int, int, int);
58 static rtx
lshift_value (enum machine_mode
, rtx
, int, int);
59 static rtx
extract_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
60 unsigned HOST_WIDE_INT
, int);
61 static void do_cmp_and_jump (rtx
, rtx
, enum rtx_code
, enum machine_mode
, rtx
);
62 static rtx
expand_smod_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
63 static rtx
expand_sdiv_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
65 /* Test whether a value is zero of a power of two. */
66 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
68 #ifndef SLOW_UNALIGNED_ACCESS
69 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
73 /* Reduce conditional compilation elsewhere. */
76 #define CODE_FOR_insv CODE_FOR_nothing
77 #define gen_insv(a,b,c,d) NULL_RTX
81 #define CODE_FOR_extv CODE_FOR_nothing
82 #define gen_extv(a,b,c,d) NULL_RTX
86 #define CODE_FOR_extzv CODE_FOR_nothing
87 #define gen_extzv(a,b,c,d) NULL_RTX
95 struct rtx_def reg
; rtunion reg_fld
[2];
96 struct rtx_def plus
; rtunion plus_fld1
;
98 struct rtx_def mult
; rtunion mult_fld1
;
99 struct rtx_def sdiv
; rtunion sdiv_fld1
;
100 struct rtx_def udiv
; rtunion udiv_fld1
;
102 struct rtx_def sdiv_32
; rtunion sdiv_32_fld1
;
103 struct rtx_def smod_32
; rtunion smod_32_fld1
;
104 struct rtx_def wide_mult
; rtunion wide_mult_fld1
;
105 struct rtx_def wide_lshr
; rtunion wide_lshr_fld1
;
106 struct rtx_def wide_trunc
;
107 struct rtx_def shift
; rtunion shift_fld1
;
108 struct rtx_def shift_mult
; rtunion shift_mult_fld1
;
109 struct rtx_def shift_add
; rtunion shift_add_fld1
;
110 struct rtx_def shift_sub0
; rtunion shift_sub0_fld1
;
111 struct rtx_def shift_sub1
; rtunion shift_sub1_fld1
;
114 rtx pow2
[MAX_BITS_PER_WORD
];
115 rtx cint
[MAX_BITS_PER_WORD
];
117 enum machine_mode mode
, wider_mode
;
121 for (m
= 1; m
< MAX_BITS_PER_WORD
; m
++)
123 pow2
[m
] = GEN_INT ((HOST_WIDE_INT
) 1 << m
);
124 cint
[m
] = GEN_INT (m
);
126 memset (&all
, 0, sizeof all
);
128 PUT_CODE (&all
.reg
, REG
);
129 /* Avoid using hard regs in ways which may be unsupported. */
130 SET_REGNO (&all
.reg
, LAST_VIRTUAL_REGISTER
+ 1);
132 PUT_CODE (&all
.plus
, PLUS
);
133 XEXP (&all
.plus
, 0) = &all
.reg
;
134 XEXP (&all
.plus
, 1) = &all
.reg
;
136 PUT_CODE (&all
.neg
, NEG
);
137 XEXP (&all
.neg
, 0) = &all
.reg
;
139 PUT_CODE (&all
.mult
, MULT
);
140 XEXP (&all
.mult
, 0) = &all
.reg
;
141 XEXP (&all
.mult
, 1) = &all
.reg
;
143 PUT_CODE (&all
.sdiv
, DIV
);
144 XEXP (&all
.sdiv
, 0) = &all
.reg
;
145 XEXP (&all
.sdiv
, 1) = &all
.reg
;
147 PUT_CODE (&all
.udiv
, UDIV
);
148 XEXP (&all
.udiv
, 0) = &all
.reg
;
149 XEXP (&all
.udiv
, 1) = &all
.reg
;
151 PUT_CODE (&all
.sdiv_32
, DIV
);
152 XEXP (&all
.sdiv_32
, 0) = &all
.reg
;
153 XEXP (&all
.sdiv_32
, 1) = 32 < MAX_BITS_PER_WORD
? cint
[32] : GEN_INT (32);
155 PUT_CODE (&all
.smod_32
, MOD
);
156 XEXP (&all
.smod_32
, 0) = &all
.reg
;
157 XEXP (&all
.smod_32
, 1) = XEXP (&all
.sdiv_32
, 1);
159 PUT_CODE (&all
.zext
, ZERO_EXTEND
);
160 XEXP (&all
.zext
, 0) = &all
.reg
;
162 PUT_CODE (&all
.wide_mult
, MULT
);
163 XEXP (&all
.wide_mult
, 0) = &all
.zext
;
164 XEXP (&all
.wide_mult
, 1) = &all
.zext
;
166 PUT_CODE (&all
.wide_lshr
, LSHIFTRT
);
167 XEXP (&all
.wide_lshr
, 0) = &all
.wide_mult
;
169 PUT_CODE (&all
.wide_trunc
, TRUNCATE
);
170 XEXP (&all
.wide_trunc
, 0) = &all
.wide_lshr
;
172 PUT_CODE (&all
.shift
, ASHIFT
);
173 XEXP (&all
.shift
, 0) = &all
.reg
;
175 PUT_CODE (&all
.shift_mult
, MULT
);
176 XEXP (&all
.shift_mult
, 0) = &all
.reg
;
178 PUT_CODE (&all
.shift_add
, PLUS
);
179 XEXP (&all
.shift_add
, 0) = &all
.shift_mult
;
180 XEXP (&all
.shift_add
, 1) = &all
.reg
;
182 PUT_CODE (&all
.shift_sub0
, MINUS
);
183 XEXP (&all
.shift_sub0
, 0) = &all
.shift_mult
;
184 XEXP (&all
.shift_sub0
, 1) = &all
.reg
;
186 PUT_CODE (&all
.shift_sub1
, MINUS
);
187 XEXP (&all
.shift_sub1
, 0) = &all
.reg
;
188 XEXP (&all
.shift_sub1
, 1) = &all
.shift_mult
;
190 for (speed
= 0; speed
< 2; speed
++)
192 crtl
->maybe_hot_insn_p
= speed
;
193 zero_cost
[speed
] = rtx_cost (const0_rtx
, SET
, speed
);
195 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
197 mode
= GET_MODE_WIDER_MODE (mode
))
199 PUT_MODE (&all
.reg
, mode
);
200 PUT_MODE (&all
.plus
, mode
);
201 PUT_MODE (&all
.neg
, mode
);
202 PUT_MODE (&all
.mult
, mode
);
203 PUT_MODE (&all
.sdiv
, mode
);
204 PUT_MODE (&all
.udiv
, mode
);
205 PUT_MODE (&all
.sdiv_32
, mode
);
206 PUT_MODE (&all
.smod_32
, mode
);
207 PUT_MODE (&all
.wide_trunc
, mode
);
208 PUT_MODE (&all
.shift
, mode
);
209 PUT_MODE (&all
.shift_mult
, mode
);
210 PUT_MODE (&all
.shift_add
, mode
);
211 PUT_MODE (&all
.shift_sub0
, mode
);
212 PUT_MODE (&all
.shift_sub1
, mode
);
214 add_cost
[speed
][mode
] = rtx_cost (&all
.plus
, SET
, speed
);
215 neg_cost
[speed
][mode
] = rtx_cost (&all
.neg
, SET
, speed
);
216 mul_cost
[speed
][mode
] = rtx_cost (&all
.mult
, SET
, speed
);
217 sdiv_cost
[speed
][mode
] = rtx_cost (&all
.sdiv
, SET
, speed
);
218 udiv_cost
[speed
][mode
] = rtx_cost (&all
.udiv
, SET
, speed
);
220 sdiv_pow2_cheap
[speed
][mode
] = (rtx_cost (&all
.sdiv_32
, SET
, speed
)
221 <= 2 * add_cost
[speed
][mode
]);
222 smod_pow2_cheap
[speed
][mode
] = (rtx_cost (&all
.smod_32
, SET
, speed
)
223 <= 4 * add_cost
[speed
][mode
]);
225 wider_mode
= GET_MODE_WIDER_MODE (mode
);
226 if (wider_mode
!= VOIDmode
)
228 PUT_MODE (&all
.zext
, wider_mode
);
229 PUT_MODE (&all
.wide_mult
, wider_mode
);
230 PUT_MODE (&all
.wide_lshr
, wider_mode
);
231 XEXP (&all
.wide_lshr
, 1) = GEN_INT (GET_MODE_BITSIZE (mode
));
233 mul_widen_cost
[speed
][wider_mode
]
234 = rtx_cost (&all
.wide_mult
, SET
, speed
);
235 mul_highpart_cost
[speed
][mode
]
236 = rtx_cost (&all
.wide_trunc
, SET
, speed
);
239 shift_cost
[speed
][mode
][0] = 0;
240 shiftadd_cost
[speed
][mode
][0] = shiftsub0_cost
[speed
][mode
][0]
241 = shiftsub1_cost
[speed
][mode
][0] = add_cost
[speed
][mode
];
243 n
= MIN (MAX_BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
244 for (m
= 1; m
< n
; m
++)
246 XEXP (&all
.shift
, 1) = cint
[m
];
247 XEXP (&all
.shift_mult
, 1) = pow2
[m
];
249 shift_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift
, SET
, speed
);
250 shiftadd_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift_add
, SET
, speed
);
251 shiftsub0_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift_sub0
, SET
, speed
);
252 shiftsub1_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift_sub1
, SET
, speed
);
257 memset (alg_hash
, 0, sizeof (alg_hash
));
259 alg_hash_used_p
= true;
260 default_rtl_profile ();
263 /* Return an rtx representing minus the value of X.
264 MODE is the intended mode of the result,
265 useful if X is a CONST_INT. */
268 negate_rtx (enum machine_mode mode
, rtx x
)
270 rtx result
= simplify_unary_operation (NEG
, mode
, x
, mode
);
273 result
= expand_unop (mode
, neg_optab
, x
, NULL_RTX
, 0);
278 /* Report on the availability of insv/extv/extzv and the desired mode
279 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
280 is false; else the mode of the specified operand. If OPNO is -1,
281 all the caller cares about is whether the insn is available. */
283 mode_for_extraction (enum extraction_pattern pattern
, int opno
)
285 const struct insn_data_d
*data
;
292 data
= &insn_data
[CODE_FOR_insv
];
295 return MAX_MACHINE_MODE
;
300 data
= &insn_data
[CODE_FOR_extv
];
303 return MAX_MACHINE_MODE
;
308 data
= &insn_data
[CODE_FOR_extzv
];
311 return MAX_MACHINE_MODE
;
320 /* Everyone who uses this function used to follow it with
321 if (result == VOIDmode) result = word_mode; */
322 if (data
->operand
[opno
].mode
== VOIDmode
)
324 return data
->operand
[opno
].mode
;
327 /* Return true if X, of mode MODE, matches the predicate for operand
328 OPNO of instruction ICODE. Allow volatile memories, regardless of
329 the ambient volatile_ok setting. */
332 check_predicate_volatile_ok (enum insn_code icode
, int opno
,
333 rtx x
, enum machine_mode mode
)
335 bool save_volatile_ok
, result
;
337 save_volatile_ok
= volatile_ok
;
338 result
= insn_data
[(int) icode
].operand
[opno
].predicate (x
, mode
);
339 volatile_ok
= save_volatile_ok
;
343 /* A subroutine of store_bit_field, with the same arguments. Return true
344 if the operation could be implemented.
346 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
347 no other way of implementing the operation. If FALLBACK_P is false,
348 return false instead. */
351 store_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
352 unsigned HOST_WIDE_INT bitnum
, enum machine_mode fieldmode
,
353 rtx value
, bool fallback_p
)
356 = (MEM_P (str_rtx
)) ? BITS_PER_UNIT
: BITS_PER_WORD
;
357 unsigned HOST_WIDE_INT offset
, bitpos
;
362 enum machine_mode op_mode
= mode_for_extraction (EP_insv
, 3);
364 while (GET_CODE (op0
) == SUBREG
)
366 /* The following line once was done only if WORDS_BIG_ENDIAN,
367 but I think that is a mistake. WORDS_BIG_ENDIAN is
368 meaningful at a much higher level; when structures are copied
369 between memory and regs, the higher-numbered regs
370 always get higher addresses. */
371 int inner_mode_size
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)));
372 int outer_mode_size
= GET_MODE_SIZE (GET_MODE (op0
));
376 /* Paradoxical subregs need special handling on big endian machines. */
377 if (SUBREG_BYTE (op0
) == 0 && inner_mode_size
< outer_mode_size
)
379 int difference
= inner_mode_size
- outer_mode_size
;
381 if (WORDS_BIG_ENDIAN
)
382 byte_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
383 if (BYTES_BIG_ENDIAN
)
384 byte_offset
+= difference
% UNITS_PER_WORD
;
387 byte_offset
= SUBREG_BYTE (op0
);
389 bitnum
+= byte_offset
* BITS_PER_UNIT
;
390 op0
= SUBREG_REG (op0
);
393 /* No action is needed if the target is a register and if the field
394 lies completely outside that register. This can occur if the source
395 code contains an out-of-bounds access to a small array. */
396 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
399 /* Use vec_set patterns for inserting parts of vectors whenever
401 if (VECTOR_MODE_P (GET_MODE (op0
))
403 && optab_handler (vec_set_optab
, GET_MODE (op0
)) != CODE_FOR_nothing
404 && fieldmode
== GET_MODE_INNER (GET_MODE (op0
))
405 && bitsize
== GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
406 && !(bitnum
% GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
408 enum machine_mode outermode
= GET_MODE (op0
);
409 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
410 int icode
= (int) optab_handler (vec_set_optab
, outermode
);
411 int pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
412 rtx rtxpos
= GEN_INT (pos
);
416 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
417 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
418 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
422 if (! (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
))
423 src
= copy_to_mode_reg (mode1
, src
);
425 if (! (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
))
426 rtxpos
= copy_to_mode_reg (mode1
, rtxpos
);
428 /* We could handle this, but we should always be called with a pseudo
429 for our targets and all insns should take them as outputs. */
430 gcc_assert ((*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
)
431 && (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
)
432 && (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
));
433 pat
= GEN_FCN (icode
) (dest
, src
, rtxpos
);
444 /* If the target is a register, overwriting the entire object, or storing
445 a full-word or multi-word field can be done with just a SUBREG.
447 If the target is memory, storing any naturally aligned field can be
448 done with a simple store. For targets that support fast unaligned
449 memory, any naturally sized, unit aligned field can be done directly. */
451 offset
= bitnum
/ unit
;
452 bitpos
= bitnum
% unit
;
453 byte_offset
= (bitnum
% BITS_PER_WORD
) / BITS_PER_UNIT
454 + (offset
* UNITS_PER_WORD
);
457 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
459 ? ((GET_MODE_SIZE (fieldmode
) >= UNITS_PER_WORD
460 || GET_MODE_SIZE (GET_MODE (op0
)) == GET_MODE_SIZE (fieldmode
))
461 && byte_offset
% GET_MODE_SIZE (fieldmode
) == 0)
462 : (! SLOW_UNALIGNED_ACCESS (fieldmode
, MEM_ALIGN (op0
))
463 || (offset
* BITS_PER_UNIT
% bitsize
== 0
464 && MEM_ALIGN (op0
) % GET_MODE_BITSIZE (fieldmode
) == 0))))
467 op0
= adjust_address (op0
, fieldmode
, offset
);
468 else if (GET_MODE (op0
) != fieldmode
)
469 op0
= simplify_gen_subreg (fieldmode
, op0
, GET_MODE (op0
),
471 emit_move_insn (op0
, value
);
475 /* Make sure we are playing with integral modes. Pun with subregs
476 if we aren't. This must come after the entire register case above,
477 since that case is valid for any mode. The following cases are only
478 valid for integral modes. */
480 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
481 if (imode
!= GET_MODE (op0
))
484 op0
= adjust_address (op0
, imode
, 0);
487 gcc_assert (imode
!= BLKmode
);
488 op0
= gen_lowpart (imode
, op0
);
493 /* We may be accessing data outside the field, which means
494 we can alias adjacent data. */
497 op0
= shallow_copy_rtx (op0
);
498 set_mem_alias_set (op0
, 0);
499 set_mem_expr (op0
, 0);
502 /* If OP0 is a register, BITPOS must count within a word.
503 But as we have it, it counts within whatever size OP0 now has.
504 On a bigendian machine, these are not the same, so convert. */
507 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
508 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
510 /* Storing an lsb-aligned field in a register
511 can be done with a movestrict instruction. */
514 && (BYTES_BIG_ENDIAN
? bitpos
+ bitsize
== unit
: bitpos
== 0)
515 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
516 && optab_handler (movstrict_optab
, fieldmode
) != CODE_FOR_nothing
)
518 int icode
= optab_handler (movstrict_optab
, fieldmode
);
520 rtx start
= get_last_insn ();
523 /* Get appropriate low part of the value being stored. */
524 if (CONST_INT_P (value
) || REG_P (value
))
525 value
= gen_lowpart (fieldmode
, value
);
526 else if (!(GET_CODE (value
) == SYMBOL_REF
527 || GET_CODE (value
) == LABEL_REF
528 || GET_CODE (value
) == CONST
))
529 value
= convert_to_mode (fieldmode
, value
, 0);
531 if (! (*insn_data
[icode
].operand
[1].predicate
) (value
, fieldmode
))
532 value
= copy_to_mode_reg (fieldmode
, value
);
534 if (GET_CODE (op0
) == SUBREG
)
536 /* Else we've got some float mode source being extracted into
537 a different float mode destination -- this combination of
538 subregs results in Severe Tire Damage. */
539 gcc_assert (GET_MODE (SUBREG_REG (op0
)) == fieldmode
540 || GET_MODE_CLASS (fieldmode
) == MODE_INT
541 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
);
542 arg0
= SUBREG_REG (op0
);
545 insn
= (GEN_FCN (icode
)
546 (gen_rtx_SUBREG (fieldmode
, arg0
,
547 (bitnum
% BITS_PER_WORD
) / BITS_PER_UNIT
548 + (offset
* UNITS_PER_WORD
)),
555 delete_insns_since (start
);
558 /* Handle fields bigger than a word. */
560 if (bitsize
> BITS_PER_WORD
)
562 /* Here we transfer the words of the field
563 in the order least significant first.
564 This is because the most significant word is the one which may
566 However, only do that if the value is not BLKmode. */
568 unsigned int backwards
= WORDS_BIG_ENDIAN
&& fieldmode
!= BLKmode
;
569 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
573 /* This is the mode we must force value to, so that there will be enough
574 subwords to extract. Note that fieldmode will often (always?) be
575 VOIDmode, because that is what store_field uses to indicate that this
576 is a bit field, but passing VOIDmode to operand_subword_force
578 fieldmode
= GET_MODE (value
);
579 if (fieldmode
== VOIDmode
)
580 fieldmode
= smallest_mode_for_size (nwords
* BITS_PER_WORD
, MODE_INT
);
582 last
= get_last_insn ();
583 for (i
= 0; i
< nwords
; i
++)
585 /* If I is 0, use the low-order word in both field and target;
586 if I is 1, use the next to lowest word; and so on. */
587 unsigned int wordnum
= (backwards
? nwords
- i
- 1 : i
);
588 unsigned int bit_offset
= (backwards
589 ? MAX ((int) bitsize
- ((int) i
+ 1)
592 : (int) i
* BITS_PER_WORD
);
593 rtx value_word
= operand_subword_force (value
, wordnum
, fieldmode
);
595 if (!store_bit_field_1 (op0
, MIN (BITS_PER_WORD
,
596 bitsize
- i
* BITS_PER_WORD
),
597 bitnum
+ bit_offset
, word_mode
,
598 value_word
, fallback_p
))
600 delete_insns_since (last
);
607 /* From here on we can assume that the field to be stored in is
608 a full-word (whatever type that is), since it is shorter than a word. */
610 /* OFFSET is the number of words or bytes (UNIT says which)
611 from STR_RTX to the first word or byte containing part of the field. */
616 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
620 /* Since this is a destination (lvalue), we can't copy
621 it to a pseudo. We can remove a SUBREG that does not
622 change the size of the operand. Such a SUBREG may
623 have been added above. */
624 gcc_assert (GET_CODE (op0
) == SUBREG
625 && (GET_MODE_SIZE (GET_MODE (op0
))
626 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)))));
627 op0
= SUBREG_REG (op0
);
629 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
630 op0
, (offset
* UNITS_PER_WORD
));
635 /* If VALUE has a floating-point or complex mode, access it as an
636 integer of the corresponding size. This can occur on a machine
637 with 64 bit registers that uses SFmode for float. It can also
638 occur for unaligned float or complex fields. */
640 if (GET_MODE (value
) != VOIDmode
641 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_INT
642 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_PARTIAL_INT
)
644 value
= gen_reg_rtx (int_mode_for_mode (GET_MODE (value
)));
645 emit_move_insn (gen_lowpart (GET_MODE (orig_value
), value
), orig_value
);
648 /* Now OFFSET is nonzero only if OP0 is memory
649 and is therefore always measured in bytes. */
652 && GET_MODE (value
) != BLKmode
654 && GET_MODE_BITSIZE (op_mode
) >= bitsize
655 && ! ((REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
656 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (op_mode
)))
657 && insn_data
[CODE_FOR_insv
].operand
[1].predicate (GEN_INT (bitsize
),
659 && check_predicate_volatile_ok (CODE_FOR_insv
, 0, op0
, VOIDmode
))
661 int xbitpos
= bitpos
;
664 rtx last
= get_last_insn ();
666 bool copy_back
= false;
668 /* Add OFFSET into OP0's address. */
670 xop0
= adjust_address (xop0
, byte_mode
, offset
);
672 /* If xop0 is a register, we need it in OP_MODE
673 to make it acceptable to the format of insv. */
674 if (GET_CODE (xop0
) == SUBREG
)
675 /* We can't just change the mode, because this might clobber op0,
676 and we will need the original value of op0 if insv fails. */
677 xop0
= gen_rtx_SUBREG (op_mode
, SUBREG_REG (xop0
), SUBREG_BYTE (xop0
));
678 if (REG_P (xop0
) && GET_MODE (xop0
) != op_mode
)
679 xop0
= gen_lowpart_SUBREG (op_mode
, xop0
);
681 /* If the destination is a paradoxical subreg such that we need a
682 truncate to the inner mode, perform the insertion on a temporary and
683 truncate the result to the original destination. Note that we can't
684 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
685 X) 0)) is (reg:N X). */
686 if (GET_CODE (xop0
) == SUBREG
687 && REG_P (SUBREG_REG (xop0
))
688 && (!TRULY_NOOP_TRUNCATION
689 (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (xop0
))),
690 GET_MODE_BITSIZE (op_mode
))))
692 rtx tem
= gen_reg_rtx (op_mode
);
693 emit_move_insn (tem
, xop0
);
698 /* On big-endian machines, we count bits from the most significant.
699 If the bit field insn does not, we must invert. */
701 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
702 xbitpos
= unit
- bitsize
- xbitpos
;
704 /* We have been counting XBITPOS within UNIT.
705 Count instead within the size of the register. */
706 if (BITS_BIG_ENDIAN
&& !MEM_P (xop0
))
707 xbitpos
+= GET_MODE_BITSIZE (op_mode
) - unit
;
709 unit
= GET_MODE_BITSIZE (op_mode
);
711 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
713 if (GET_MODE (value
) != op_mode
)
715 if (GET_MODE_BITSIZE (GET_MODE (value
)) >= bitsize
)
717 /* Optimization: Don't bother really extending VALUE
718 if it has all the bits we will actually use. However,
719 if we must narrow it, be sure we do it correctly. */
721 if (GET_MODE_SIZE (GET_MODE (value
)) < GET_MODE_SIZE (op_mode
))
725 tmp
= simplify_subreg (op_mode
, value1
, GET_MODE (value
), 0);
727 tmp
= simplify_gen_subreg (op_mode
,
728 force_reg (GET_MODE (value
),
730 GET_MODE (value
), 0);
734 value1
= gen_lowpart (op_mode
, value1
);
736 else if (CONST_INT_P (value
))
737 value1
= gen_int_mode (INTVAL (value
), op_mode
);
739 /* Parse phase is supposed to make VALUE's data type
740 match that of the component reference, which is a type
741 at least as wide as the field; so VALUE should have
742 a mode that corresponds to that type. */
743 gcc_assert (CONSTANT_P (value
));
746 /* If this machine's insv insists on a register,
747 get VALUE1 into a register. */
748 if (! ((*insn_data
[(int) CODE_FOR_insv
].operand
[3].predicate
)
750 value1
= force_reg (op_mode
, value1
);
752 pat
= gen_insv (xop0
, GEN_INT (bitsize
), GEN_INT (xbitpos
), value1
);
758 convert_move (op0
, xop0
, true);
761 delete_insns_since (last
);
764 /* If OP0 is a memory, try copying it to a register and seeing if a
765 cheap register alternative is available. */
766 if (HAVE_insv
&& MEM_P (op0
))
768 enum machine_mode bestmode
;
770 /* Get the mode to use for inserting into this field. If OP0 is
771 BLKmode, get the smallest mode consistent with the alignment. If
772 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
773 mode. Otherwise, use the smallest mode containing the field. */
775 if (GET_MODE (op0
) == BLKmode
776 || (op_mode
!= MAX_MACHINE_MODE
777 && GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (op_mode
)))
778 bestmode
= get_best_mode (bitsize
, bitnum
, MEM_ALIGN (op0
),
779 (op_mode
== MAX_MACHINE_MODE
780 ? VOIDmode
: op_mode
),
781 MEM_VOLATILE_P (op0
));
783 bestmode
= GET_MODE (op0
);
785 if (bestmode
!= VOIDmode
786 && GET_MODE_SIZE (bestmode
) >= GET_MODE_SIZE (fieldmode
)
787 && !(SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (op0
))
788 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (op0
)))
790 rtx last
, tempreg
, xop0
;
791 unsigned HOST_WIDE_INT xoffset
, xbitpos
;
793 last
= get_last_insn ();
795 /* Adjust address to point to the containing unit of
796 that mode. Compute the offset as a multiple of this unit,
797 counting in bytes. */
798 unit
= GET_MODE_BITSIZE (bestmode
);
799 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
800 xbitpos
= bitnum
% unit
;
801 xop0
= adjust_address (op0
, bestmode
, xoffset
);
803 /* Fetch that unit, store the bitfield in it, then store
805 tempreg
= copy_to_reg (xop0
);
806 if (store_bit_field_1 (tempreg
, bitsize
, xbitpos
,
807 fieldmode
, orig_value
, false))
809 emit_move_insn (xop0
, tempreg
);
812 delete_insns_since (last
);
819 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
);
823 /* Generate code to store value from rtx VALUE
824 into a bit-field within structure STR_RTX
825 containing BITSIZE bits starting at bit BITNUM.
826 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
829 store_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
830 unsigned HOST_WIDE_INT bitnum
, enum machine_mode fieldmode
,
833 if (!store_bit_field_1 (str_rtx
, bitsize
, bitnum
, fieldmode
, value
, true))
837 /* Use shifts and boolean operations to store VALUE
838 into a bit field of width BITSIZE
839 in a memory location specified by OP0 except offset by OFFSET bytes.
840 (OFFSET must be 0 if OP0 is a register.)
841 The field starts at position BITPOS within the byte.
842 (If OP0 is a register, it may be a full word or a narrower mode,
843 but BITPOS still counts within a full word,
844 which is significant on bigendian machines.) */
847 store_fixed_bit_field (rtx op0
, unsigned HOST_WIDE_INT offset
,
848 unsigned HOST_WIDE_INT bitsize
,
849 unsigned HOST_WIDE_INT bitpos
, rtx value
)
851 enum machine_mode mode
;
852 unsigned int total_bits
= BITS_PER_WORD
;
857 /* There is a case not handled here:
858 a structure with a known alignment of just a halfword
859 and a field split across two aligned halfwords within the structure.
860 Or likewise a structure with a known alignment of just a byte
861 and a field split across two bytes.
862 Such cases are not supposed to be able to occur. */
864 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
866 gcc_assert (!offset
);
867 /* Special treatment for a bit field split across two registers. */
868 if (bitsize
+ bitpos
> BITS_PER_WORD
)
870 store_split_bit_field (op0
, bitsize
, bitpos
, value
);
876 /* Get the proper mode to use for this field. We want a mode that
877 includes the entire field. If such a mode would be larger than
878 a word, we won't be doing the extraction the normal way.
879 We don't want a mode bigger than the destination. */
881 mode
= GET_MODE (op0
);
882 if (GET_MODE_BITSIZE (mode
) == 0
883 || GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (word_mode
))
886 if (MEM_VOLATILE_P (op0
)
887 && GET_MODE_BITSIZE (GET_MODE (op0
)) > 0
888 && flag_strict_volatile_bitfields
> 0)
889 mode
= GET_MODE (op0
);
891 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
892 MEM_ALIGN (op0
), mode
, MEM_VOLATILE_P (op0
));
894 if (mode
== VOIDmode
)
896 /* The only way this should occur is if the field spans word
898 store_split_bit_field (op0
, bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
903 total_bits
= GET_MODE_BITSIZE (mode
);
905 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
906 be in the range 0 to total_bits-1, and put any excess bytes in
908 if (bitpos
>= total_bits
)
910 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
911 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
915 /* Get ref to an aligned byte, halfword, or word containing the field.
916 Adjust BITPOS to be position within a word,
917 and OFFSET to be the offset of that word.
918 Then alter OP0 to refer to that word. */
919 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
920 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
921 op0
= adjust_address (op0
, mode
, offset
);
924 mode
= GET_MODE (op0
);
926 /* Now MODE is either some integral mode for a MEM as OP0,
927 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
928 The bit field is contained entirely within OP0.
929 BITPOS is the starting bit number within OP0.
930 (OP0's mode may actually be narrower than MODE.) */
932 if (BYTES_BIG_ENDIAN
)
933 /* BITPOS is the distance between our msb
934 and that of the containing datum.
935 Convert it to the distance from the lsb. */
936 bitpos
= total_bits
- bitsize
- bitpos
;
938 /* Now BITPOS is always the distance between our lsb
941 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
942 we must first convert its mode to MODE. */
944 if (CONST_INT_P (value
))
946 HOST_WIDE_INT v
= INTVAL (value
);
948 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
949 v
&= ((HOST_WIDE_INT
) 1 << bitsize
) - 1;
953 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
954 && v
== ((HOST_WIDE_INT
) 1 << bitsize
) - 1)
955 || (bitsize
== HOST_BITS_PER_WIDE_INT
&& v
== -1))
958 value
= lshift_value (mode
, value
, bitpos
, bitsize
);
962 int must_and
= (GET_MODE_BITSIZE (GET_MODE (value
)) != bitsize
963 && bitpos
+ bitsize
!= GET_MODE_BITSIZE (mode
));
965 if (GET_MODE (value
) != mode
)
966 value
= convert_to_mode (mode
, value
, 1);
969 value
= expand_binop (mode
, and_optab
, value
,
970 mask_rtx (mode
, 0, bitsize
, 0),
971 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
973 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
974 build_int_cst (NULL_TREE
, bitpos
), NULL_RTX
, 1);
977 /* Now clear the chosen bits in OP0,
978 except that if VALUE is -1 we need not bother. */
979 /* We keep the intermediates in registers to allow CSE to combine
980 consecutive bitfield assignments. */
982 temp
= force_reg (mode
, op0
);
986 temp
= expand_binop (mode
, and_optab
, temp
,
987 mask_rtx (mode
, bitpos
, bitsize
, 1),
988 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
989 temp
= force_reg (mode
, temp
);
992 /* Now logical-or VALUE into OP0, unless it is zero. */
996 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
997 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
998 temp
= force_reg (mode
, temp
);
1003 op0
= copy_rtx (op0
);
1004 emit_move_insn (op0
, temp
);
1008 /* Store a bit field that is split across multiple accessible memory objects.
1010 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1011 BITSIZE is the field width; BITPOS the position of its first bit
1013 VALUE is the value to store.
1015 This does not yet handle fields wider than BITS_PER_WORD. */
1018 store_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1019 unsigned HOST_WIDE_INT bitpos
, rtx value
)
1022 unsigned int bitsdone
= 0;
1024 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1026 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1027 unit
= BITS_PER_WORD
;
1029 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1031 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1032 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1033 that VALUE might be a floating-point constant. */
1034 if (CONSTANT_P (value
) && !CONST_INT_P (value
))
1036 rtx word
= gen_lowpart_common (word_mode
, value
);
1038 if (word
&& (value
!= word
))
1041 value
= gen_lowpart_common (word_mode
,
1042 force_reg (GET_MODE (value
) != VOIDmode
1044 : word_mode
, value
));
1047 while (bitsdone
< bitsize
)
1049 unsigned HOST_WIDE_INT thissize
;
1051 unsigned HOST_WIDE_INT thispos
;
1052 unsigned HOST_WIDE_INT offset
;
1054 offset
= (bitpos
+ bitsdone
) / unit
;
1055 thispos
= (bitpos
+ bitsdone
) % unit
;
1057 /* THISSIZE must not overrun a word boundary. Otherwise,
1058 store_fixed_bit_field will call us again, and we will mutually
1060 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1061 thissize
= MIN (thissize
, unit
- thispos
);
1063 if (BYTES_BIG_ENDIAN
)
1067 /* We must do an endian conversion exactly the same way as it is
1068 done in extract_bit_field, so that the two calls to
1069 extract_fixed_bit_field will have comparable arguments. */
1070 if (!MEM_P (value
) || GET_MODE (value
) == BLKmode
)
1071 total_bits
= BITS_PER_WORD
;
1073 total_bits
= GET_MODE_BITSIZE (GET_MODE (value
));
1075 /* Fetch successively less significant portions. */
1076 if (CONST_INT_P (value
))
1077 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1078 >> (bitsize
- bitsdone
- thissize
))
1079 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1081 /* The args are chosen so that the last part includes the
1082 lsb. Give extract_bit_field the value it needs (with
1083 endianness compensation) to fetch the piece we want. */
1084 part
= extract_fixed_bit_field (word_mode
, value
, 0, thissize
,
1085 total_bits
- bitsize
+ bitsdone
,
1086 NULL_RTX
, 1, false);
1090 /* Fetch successively more significant portions. */
1091 if (CONST_INT_P (value
))
1092 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1094 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1096 part
= extract_fixed_bit_field (word_mode
, value
, 0, thissize
,
1097 bitsdone
, NULL_RTX
, 1, false);
1100 /* If OP0 is a register, then handle OFFSET here.
1102 When handling multiword bitfields, extract_bit_field may pass
1103 down a word_mode SUBREG of a larger REG for a bitfield that actually
1104 crosses a word boundary. Thus, for a SUBREG, we must find
1105 the current word starting from the base register. */
1106 if (GET_CODE (op0
) == SUBREG
)
1108 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1109 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1110 GET_MODE (SUBREG_REG (op0
)));
1113 else if (REG_P (op0
))
1115 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1121 /* OFFSET is in UNITs, and UNIT is in bits.
1122 store_fixed_bit_field wants offset in bytes. */
1123 store_fixed_bit_field (word
, offset
* unit
/ BITS_PER_UNIT
, thissize
,
1125 bitsdone
+= thissize
;
1129 /* A subroutine of extract_bit_field_1 that converts return value X
1130 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1131 to extract_bit_field. */
1134 convert_extracted_bit_field (rtx x
, enum machine_mode mode
,
1135 enum machine_mode tmode
, bool unsignedp
)
1137 if (GET_MODE (x
) == tmode
|| GET_MODE (x
) == mode
)
1140 /* If the x mode is not a scalar integral, first convert to the
1141 integer mode of that size and then access it as a floating-point
1142 value via a SUBREG. */
1143 if (!SCALAR_INT_MODE_P (tmode
))
1145 enum machine_mode smode
;
1147 smode
= mode_for_size (GET_MODE_BITSIZE (tmode
), MODE_INT
, 0);
1148 x
= convert_to_mode (smode
, x
, unsignedp
);
1149 x
= force_reg (smode
, x
);
1150 return gen_lowpart (tmode
, x
);
1153 return convert_to_mode (tmode
, x
, unsignedp
);
1156 /* A subroutine of extract_bit_field, with the same arguments.
1157 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1158 if we can find no other means of implementing the operation.
1159 if FALLBACK_P is false, return NULL instead. */
1162 extract_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1163 unsigned HOST_WIDE_INT bitnum
,
1164 int unsignedp
, bool packedp
, rtx target
,
1165 enum machine_mode mode
, enum machine_mode tmode
,
1169 = (MEM_P (str_rtx
)) ? BITS_PER_UNIT
: BITS_PER_WORD
;
1170 unsigned HOST_WIDE_INT offset
, bitpos
;
1172 enum machine_mode int_mode
;
1173 enum machine_mode ext_mode
;
1174 enum machine_mode mode1
;
1175 enum insn_code icode
;
1178 if (tmode
== VOIDmode
)
1181 while (GET_CODE (op0
) == SUBREG
)
1183 bitnum
+= SUBREG_BYTE (op0
) * BITS_PER_UNIT
;
1184 op0
= SUBREG_REG (op0
);
1187 /* If we have an out-of-bounds access to a register, just return an
1188 uninitialized register of the required mode. This can occur if the
1189 source code contains an out-of-bounds access to a small array. */
1190 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
1191 return gen_reg_rtx (tmode
);
1194 && mode
== GET_MODE (op0
)
1196 && bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
1198 /* We're trying to extract a full register from itself. */
1202 /* See if we can get a better vector mode before extracting. */
1203 if (VECTOR_MODE_P (GET_MODE (op0
))
1205 && GET_MODE_INNER (GET_MODE (op0
)) != tmode
)
1207 enum machine_mode new_mode
;
1208 int nunits
= GET_MODE_NUNITS (GET_MODE (op0
));
1210 if (GET_MODE_CLASS (tmode
) == MODE_FLOAT
)
1211 new_mode
= MIN_MODE_VECTOR_FLOAT
;
1212 else if (GET_MODE_CLASS (tmode
) == MODE_FRACT
)
1213 new_mode
= MIN_MODE_VECTOR_FRACT
;
1214 else if (GET_MODE_CLASS (tmode
) == MODE_UFRACT
)
1215 new_mode
= MIN_MODE_VECTOR_UFRACT
;
1216 else if (GET_MODE_CLASS (tmode
) == MODE_ACCUM
)
1217 new_mode
= MIN_MODE_VECTOR_ACCUM
;
1218 else if (GET_MODE_CLASS (tmode
) == MODE_UACCUM
)
1219 new_mode
= MIN_MODE_VECTOR_UACCUM
;
1221 new_mode
= MIN_MODE_VECTOR_INT
;
1223 for (; new_mode
!= VOIDmode
; new_mode
= GET_MODE_WIDER_MODE (new_mode
))
1224 if (GET_MODE_NUNITS (new_mode
) == nunits
1225 && GET_MODE_SIZE (new_mode
) == GET_MODE_SIZE (GET_MODE (op0
))
1226 && targetm
.vector_mode_supported_p (new_mode
))
1228 if (new_mode
!= VOIDmode
)
1229 op0
= gen_lowpart (new_mode
, op0
);
1232 /* Use vec_extract patterns for extracting parts of vectors whenever
1234 if (VECTOR_MODE_P (GET_MODE (op0
))
1236 && optab_handler (vec_extract_optab
, GET_MODE (op0
)) != CODE_FOR_nothing
1237 && ((bitnum
+ bitsize
- 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
1238 == bitnum
/ GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
1240 enum machine_mode outermode
= GET_MODE (op0
);
1241 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
1242 int icode
= (int) optab_handler (vec_extract_optab
, outermode
);
1243 unsigned HOST_WIDE_INT pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
1244 rtx rtxpos
= GEN_INT (pos
);
1246 rtx dest
= NULL
, pat
, seq
;
1247 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
1248 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
1249 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
1251 if (innermode
== tmode
|| innermode
== mode
)
1255 dest
= gen_reg_rtx (innermode
);
1259 if (! (*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
))
1260 dest
= copy_to_mode_reg (mode0
, dest
);
1262 if (! (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
))
1263 src
= copy_to_mode_reg (mode1
, src
);
1265 if (! (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
))
1266 rtxpos
= copy_to_mode_reg (mode1
, rtxpos
);
1268 /* We could handle this, but we should always be called with a pseudo
1269 for our targets and all insns should take them as outputs. */
1270 gcc_assert ((*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
)
1271 && (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
)
1272 && (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
));
1274 pat
= GEN_FCN (icode
) (dest
, src
, rtxpos
);
1282 return gen_lowpart (tmode
, dest
);
1287 /* Make sure we are playing with integral modes. Pun with subregs
1290 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
1291 if (imode
!= GET_MODE (op0
))
1294 op0
= adjust_address (op0
, imode
, 0);
1295 else if (imode
!= BLKmode
)
1297 op0
= gen_lowpart (imode
, op0
);
1299 /* If we got a SUBREG, force it into a register since we
1300 aren't going to be able to do another SUBREG on it. */
1301 if (GET_CODE (op0
) == SUBREG
)
1302 op0
= force_reg (imode
, op0
);
1304 else if (REG_P (op0
))
1307 imode
= smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0
)),
1309 reg
= gen_reg_rtx (imode
);
1310 subreg
= gen_lowpart_SUBREG (GET_MODE (op0
), reg
);
1311 emit_move_insn (subreg
, op0
);
1313 bitnum
+= SUBREG_BYTE (subreg
) * BITS_PER_UNIT
;
1317 rtx mem
= assign_stack_temp (GET_MODE (op0
),
1318 GET_MODE_SIZE (GET_MODE (op0
)), 0);
1319 emit_move_insn (mem
, op0
);
1320 op0
= adjust_address (mem
, BLKmode
, 0);
1325 /* We may be accessing data outside the field, which means
1326 we can alias adjacent data. */
1329 op0
= shallow_copy_rtx (op0
);
1330 set_mem_alias_set (op0
, 0);
1331 set_mem_expr (op0
, 0);
1334 /* Extraction of a full-word or multi-word value from a structure
1335 in a register or aligned memory can be done with just a SUBREG.
1336 A subword value in the least significant part of a register
1337 can also be extracted with a SUBREG. For this, we need the
1338 byte offset of the value in op0. */
1340 bitpos
= bitnum
% unit
;
1341 offset
= bitnum
/ unit
;
1342 byte_offset
= bitpos
/ BITS_PER_UNIT
+ offset
* UNITS_PER_WORD
;
1344 /* If OP0 is a register, BITPOS must count within a word.
1345 But as we have it, it counts within whatever size OP0 now has.
1346 On a bigendian machine, these are not the same, so convert. */
1347 if (BYTES_BIG_ENDIAN
1349 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
1350 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
1352 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1353 If that's wrong, the solution is to test for it and set TARGET to 0
1356 /* Only scalar integer modes can be converted via subregs. There is an
1357 additional problem for FP modes here in that they can have a precision
1358 which is different from the size. mode_for_size uses precision, but
1359 we want a mode based on the size, so we must avoid calling it for FP
1361 mode1
= (SCALAR_INT_MODE_P (tmode
)
1362 ? mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0)
1365 /* If the bitfield is volatile, we need to make sure the access
1366 remains on a type-aligned boundary. */
1367 if (GET_CODE (op0
) == MEM
1368 && MEM_VOLATILE_P (op0
)
1369 && GET_MODE_BITSIZE (GET_MODE (op0
)) > 0
1370 && flag_strict_volatile_bitfields
> 0)
1371 goto no_subreg_mode_swap
;
1373 if (((bitsize
>= BITS_PER_WORD
&& bitsize
== GET_MODE_BITSIZE (mode
)
1374 && bitpos
% BITS_PER_WORD
== 0)
1375 || (mode1
!= BLKmode
1376 /* ??? The big endian test here is wrong. This is correct
1377 if the value is in a register, and if mode_for_size is not
1378 the same mode as op0. This causes us to get unnecessarily
1379 inefficient code from the Thumb port when -mbig-endian. */
1380 && (BYTES_BIG_ENDIAN
1381 ? bitpos
+ bitsize
== BITS_PER_WORD
1384 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1
),
1385 GET_MODE_BITSIZE (GET_MODE (op0
)))
1386 && GET_MODE_SIZE (mode1
) != 0
1387 && byte_offset
% GET_MODE_SIZE (mode1
) == 0)
1389 && (! SLOW_UNALIGNED_ACCESS (mode
, MEM_ALIGN (op0
))
1390 || (offset
* BITS_PER_UNIT
% bitsize
== 0
1391 && MEM_ALIGN (op0
) % bitsize
== 0)))))
1394 op0
= adjust_address (op0
, mode1
, offset
);
1395 else if (mode1
!= GET_MODE (op0
))
1397 rtx sub
= simplify_gen_subreg (mode1
, op0
, GET_MODE (op0
),
1400 goto no_subreg_mode_swap
;
1404 return convert_to_mode (tmode
, op0
, unsignedp
);
1407 no_subreg_mode_swap
:
1409 /* Handle fields bigger than a word. */
1411 if (bitsize
> BITS_PER_WORD
)
1413 /* Here we transfer the words of the field
1414 in the order least significant first.
1415 This is because the most significant word is the one which may
1416 be less than full. */
1418 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
1421 if (target
== 0 || !REG_P (target
))
1422 target
= gen_reg_rtx (mode
);
1424 /* Indicate for flow that the entire target reg is being set. */
1425 emit_clobber (target
);
1427 for (i
= 0; i
< nwords
; i
++)
1429 /* If I is 0, use the low-order word in both field and target;
1430 if I is 1, use the next to lowest word; and so on. */
1431 /* Word number in TARGET to use. */
1432 unsigned int wordnum
1434 ? GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
- i
- 1
1436 /* Offset from start of field in OP0. */
1437 unsigned int bit_offset
= (WORDS_BIG_ENDIAN
1438 ? MAX (0, ((int) bitsize
- ((int) i
+ 1)
1439 * (int) BITS_PER_WORD
))
1440 : (int) i
* BITS_PER_WORD
);
1441 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1443 = extract_bit_field (op0
, MIN (BITS_PER_WORD
,
1444 bitsize
- i
* BITS_PER_WORD
),
1445 bitnum
+ bit_offset
, 1, false, target_part
, mode
,
1448 gcc_assert (target_part
);
1450 if (result_part
!= target_part
)
1451 emit_move_insn (target_part
, result_part
);
1456 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1457 need to be zero'd out. */
1458 if (GET_MODE_SIZE (GET_MODE (target
)) > nwords
* UNITS_PER_WORD
)
1460 unsigned int i
, total_words
;
1462 total_words
= GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
;
1463 for (i
= nwords
; i
< total_words
; i
++)
1465 (operand_subword (target
,
1466 WORDS_BIG_ENDIAN
? total_words
- i
- 1 : i
,
1473 /* Signed bit field: sign-extend with two arithmetic shifts. */
1474 target
= expand_shift (LSHIFT_EXPR
, mode
, target
,
1475 build_int_cst (NULL_TREE
,
1476 GET_MODE_BITSIZE (mode
) - bitsize
),
1478 return expand_shift (RSHIFT_EXPR
, mode
, target
,
1479 build_int_cst (NULL_TREE
,
1480 GET_MODE_BITSIZE (mode
) - bitsize
),
1484 /* From here on we know the desired field is smaller than a word. */
1486 /* Check if there is a correspondingly-sized integer field, so we can
1487 safely extract it as one size of integer, if necessary; then
1488 truncate or extend to the size that is wanted; then use SUBREGs or
1489 convert_to_mode to get one of the modes we really wanted. */
1491 int_mode
= int_mode_for_mode (tmode
);
1492 if (int_mode
== BLKmode
)
1493 int_mode
= int_mode_for_mode (mode
);
1494 /* Should probably push op0 out to memory and then do a load. */
1495 gcc_assert (int_mode
!= BLKmode
);
1497 /* OFFSET is the number of words or bytes (UNIT says which)
1498 from STR_RTX to the first word or byte containing part of the field. */
1502 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
1505 op0
= copy_to_reg (op0
);
1506 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
1507 op0
, (offset
* UNITS_PER_WORD
));
1512 /* Now OFFSET is nonzero only for memory operands. */
1513 ext_mode
= mode_for_extraction (unsignedp
? EP_extzv
: EP_extv
, 0);
1514 icode
= unsignedp
? CODE_FOR_extzv
: CODE_FOR_extv
;
1515 if (ext_mode
!= MAX_MACHINE_MODE
1517 && GET_MODE_BITSIZE (ext_mode
) >= bitsize
1518 /* If op0 is a register, we need it in EXT_MODE to make it
1519 acceptable to the format of ext(z)v. */
1520 && !(GET_CODE (op0
) == SUBREG
&& GET_MODE (op0
) != ext_mode
)
1521 && !((REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1522 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (ext_mode
)))
1523 && check_predicate_volatile_ok (icode
, 1, op0
, GET_MODE (op0
)))
1525 unsigned HOST_WIDE_INT xbitpos
= bitpos
, xoffset
= offset
;
1526 rtx bitsize_rtx
, bitpos_rtx
;
1527 rtx last
= get_last_insn ();
1529 rtx xtarget
= target
;
1530 rtx xspec_target
= target
;
1531 rtx xspec_target_subreg
= 0;
1534 /* If op0 is a register, we need it in EXT_MODE to make it
1535 acceptable to the format of ext(z)v. */
1536 if (REG_P (xop0
) && GET_MODE (xop0
) != ext_mode
)
1537 xop0
= gen_lowpart_SUBREG (ext_mode
, xop0
);
1539 /* Get ref to first byte containing part of the field. */
1540 xop0
= adjust_address (xop0
, byte_mode
, xoffset
);
1542 /* On big-endian machines, we count bits from the most significant.
1543 If the bit field insn does not, we must invert. */
1544 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1545 xbitpos
= unit
- bitsize
- xbitpos
;
1547 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1548 if (BITS_BIG_ENDIAN
&& !MEM_P (xop0
))
1549 xbitpos
+= GET_MODE_BITSIZE (ext_mode
) - unit
;
1551 unit
= GET_MODE_BITSIZE (ext_mode
);
1554 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1556 if (GET_MODE (xtarget
) != ext_mode
)
1558 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1559 between the mode of the extraction (word_mode) and the target
1560 mode. Instead, create a temporary and use convert_move to set
1563 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (xtarget
)),
1564 GET_MODE_BITSIZE (ext_mode
)))
1566 xtarget
= gen_lowpart (ext_mode
, xtarget
);
1567 if (GET_MODE_SIZE (ext_mode
)
1568 > GET_MODE_SIZE (GET_MODE (xspec_target
)))
1569 xspec_target_subreg
= xtarget
;
1572 xtarget
= gen_reg_rtx (ext_mode
);
1575 /* If this machine's ext(z)v insists on a register target,
1576 make sure we have one. */
1577 if (!insn_data
[(int) icode
].operand
[0].predicate (xtarget
, ext_mode
))
1578 xtarget
= gen_reg_rtx (ext_mode
);
1580 bitsize_rtx
= GEN_INT (bitsize
);
1581 bitpos_rtx
= GEN_INT (xbitpos
);
1584 ? gen_extzv (xtarget
, xop0
, bitsize_rtx
, bitpos_rtx
)
1585 : gen_extv (xtarget
, xop0
, bitsize_rtx
, bitpos_rtx
));
1589 if (xtarget
== xspec_target
)
1591 if (xtarget
== xspec_target_subreg
)
1592 return xspec_target
;
1593 return convert_extracted_bit_field (xtarget
, mode
, tmode
, unsignedp
);
1595 delete_insns_since (last
);
1598 /* If OP0 is a memory, try copying it to a register and seeing if a
1599 cheap register alternative is available. */
1600 if (ext_mode
!= MAX_MACHINE_MODE
&& MEM_P (op0
))
1602 enum machine_mode bestmode
;
1604 /* Get the mode to use for inserting into this field. If
1605 OP0 is BLKmode, get the smallest mode consistent with the
1606 alignment. If OP0 is a non-BLKmode object that is no
1607 wider than EXT_MODE, use its mode. Otherwise, use the
1608 smallest mode containing the field. */
1610 if (GET_MODE (op0
) == BLKmode
1611 || (ext_mode
!= MAX_MACHINE_MODE
1612 && GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (ext_mode
)))
1613 bestmode
= get_best_mode (bitsize
, bitnum
, MEM_ALIGN (op0
),
1614 (ext_mode
== MAX_MACHINE_MODE
1615 ? VOIDmode
: ext_mode
),
1616 MEM_VOLATILE_P (op0
));
1618 bestmode
= GET_MODE (op0
);
1620 if (bestmode
!= VOIDmode
1621 && !(SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (op0
))
1622 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (op0
)))
1624 unsigned HOST_WIDE_INT xoffset
, xbitpos
;
1626 /* Compute the offset as a multiple of this unit,
1627 counting in bytes. */
1628 unit
= GET_MODE_BITSIZE (bestmode
);
1629 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1630 xbitpos
= bitnum
% unit
;
1632 /* Make sure the register is big enough for the whole field. */
1633 if (xoffset
* BITS_PER_UNIT
+ unit
1634 >= offset
* BITS_PER_UNIT
+ bitsize
)
1636 rtx last
, result
, xop0
;
1638 last
= get_last_insn ();
1640 /* Fetch it to a register in that size. */
1641 xop0
= adjust_address (op0
, bestmode
, xoffset
);
1642 xop0
= force_reg (bestmode
, xop0
);
1643 result
= extract_bit_field_1 (xop0
, bitsize
, xbitpos
,
1644 unsignedp
, packedp
, target
,
1645 mode
, tmode
, false);
1649 delete_insns_since (last
);
1657 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1658 bitpos
, target
, unsignedp
, packedp
);
1659 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1662 /* Generate code to extract a byte-field from STR_RTX
1663 containing BITSIZE bits, starting at BITNUM,
1664 and put it in TARGET if possible (if TARGET is nonzero).
1665 Regardless of TARGET, we return the rtx for where the value is placed.
1667 STR_RTX is the structure containing the byte (a REG or MEM).
1668 UNSIGNEDP is nonzero if this is an unsigned bit field.
1669 PACKEDP is nonzero if the field has the packed attribute.
1670 MODE is the natural mode of the field value once extracted.
1671 TMODE is the mode the caller would like the value to have;
1672 but the value may be returned with type MODE instead.
1674 If a TARGET is specified and we can store in it at no extra cost,
1675 we do so, and return TARGET.
1676 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1677 if they are equally easy. */
1680 extract_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1681 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, bool packedp
,
1682 rtx target
, enum machine_mode mode
, enum machine_mode tmode
)
1684 return extract_bit_field_1 (str_rtx
, bitsize
, bitnum
, unsignedp
, packedp
,
1685 target
, mode
, tmode
, true);
1688 /* Extract a bit field using shifts and boolean operations
1689 Returns an rtx to represent the value.
1690 OP0 addresses a register (word) or memory (byte).
1691 BITPOS says which bit within the word or byte the bit field starts in.
1692 OFFSET says how many bytes farther the bit field starts;
1693 it is 0 if OP0 is a register.
1694 BITSIZE says how many bits long the bit field is.
1695 (If OP0 is a register, it may be narrower than a full word,
1696 but BITPOS still counts within a full word,
1697 which is significant on bigendian machines.)
1699 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1700 PACKEDP is true if the field has the packed attribute.
1702 If TARGET is nonzero, attempts to store the value there
1703 and return TARGET, but this is not guaranteed.
1704 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1707 extract_fixed_bit_field (enum machine_mode tmode
, rtx op0
,
1708 unsigned HOST_WIDE_INT offset
,
1709 unsigned HOST_WIDE_INT bitsize
,
1710 unsigned HOST_WIDE_INT bitpos
, rtx target
,
1711 int unsignedp
, bool packedp
)
1713 unsigned int total_bits
= BITS_PER_WORD
;
1714 enum machine_mode mode
;
1716 if (GET_CODE (op0
) == SUBREG
|| REG_P (op0
))
1718 /* Special treatment for a bit field split across two registers. */
1719 if (bitsize
+ bitpos
> BITS_PER_WORD
)
1720 return extract_split_bit_field (op0
, bitsize
, bitpos
, unsignedp
);
1724 /* Get the proper mode to use for this field. We want a mode that
1725 includes the entire field. If such a mode would be larger than
1726 a word, we won't be doing the extraction the normal way. */
1728 if (MEM_VOLATILE_P (op0
)
1729 && flag_strict_volatile_bitfields
> 0)
1731 if (GET_MODE_BITSIZE (GET_MODE (op0
)) > 0)
1732 mode
= GET_MODE (op0
);
1733 else if (target
&& GET_MODE_BITSIZE (GET_MODE (target
)) > 0)
1734 mode
= GET_MODE (target
);
1739 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
1740 MEM_ALIGN (op0
), word_mode
, MEM_VOLATILE_P (op0
));
1742 if (mode
== VOIDmode
)
1743 /* The only way this should occur is if the field spans word
1745 return extract_split_bit_field (op0
, bitsize
,
1746 bitpos
+ offset
* BITS_PER_UNIT
,
1749 total_bits
= GET_MODE_BITSIZE (mode
);
1751 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1752 be in the range 0 to total_bits-1, and put any excess bytes in
1754 if (bitpos
>= total_bits
)
1756 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
1757 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
1761 /* If we're accessing a volatile MEM, we can't do the next
1762 alignment step if it results in a multi-word access where we
1763 otherwise wouldn't have one. So, check for that case
1766 && MEM_VOLATILE_P (op0
)
1767 && flag_strict_volatile_bitfields
> 0
1768 && bitpos
+ bitsize
<= total_bits
1769 && bitpos
+ bitsize
+ (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
> total_bits
)
1771 if (STRICT_ALIGNMENT
)
1773 static bool informed_about_misalignment
= false;
1778 if (bitsize
== total_bits
)
1779 warned
= warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1780 "multiple accesses to volatile structure member"
1781 " because of packed attribute");
1783 warned
= warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1784 "multiple accesses to volatile structure bitfield"
1785 " because of packed attribute");
1787 return extract_split_bit_field (op0
, bitsize
,
1788 bitpos
+ offset
* BITS_PER_UNIT
,
1792 if (bitsize
== total_bits
)
1793 warned
= warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1794 "mis-aligned access used for structure member");
1796 warned
= warning_at (input_location
, OPT_fstrict_volatile_bitfields
,
1797 "mis-aligned access used for structure bitfield");
1799 if (! informed_about_misalignment
&& warned
)
1801 informed_about_misalignment
= true;
1802 inform (input_location
,
1803 "When a volatile object spans multiple type-sized locations,"
1804 " the compiler must choose between using a single mis-aligned access to"
1805 " preserve the volatility, or using multiple aligned accesses to avoid"
1806 " runtime faults. This code may fail at runtime if the hardware does"
1807 " not allow this access.");
1814 /* Get ref to an aligned byte, halfword, or word containing the field.
1815 Adjust BITPOS to be position within a word,
1816 and OFFSET to be the offset of that word.
1817 Then alter OP0 to refer to that word. */
1818 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
1819 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
1822 op0
= adjust_address (op0
, mode
, offset
);
1825 mode
= GET_MODE (op0
);
1827 if (BYTES_BIG_ENDIAN
)
1828 /* BITPOS is the distance between our msb and that of OP0.
1829 Convert it to the distance from the lsb. */
1830 bitpos
= total_bits
- bitsize
- bitpos
;
1832 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1833 We have reduced the big-endian case to the little-endian case. */
1839 /* If the field does not already start at the lsb,
1840 shift it so it does. */
1841 tree amount
= build_int_cst (NULL_TREE
, bitpos
);
1842 /* Maybe propagate the target for the shift. */
1843 /* But not if we will return it--could confuse integrate.c. */
1844 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1845 if (tmode
!= mode
) subtarget
= 0;
1846 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1848 /* Convert the value to the desired mode. */
1850 op0
= convert_to_mode (tmode
, op0
, 1);
1852 /* Unless the msb of the field used to be the msb when we shifted,
1853 mask out the upper bits. */
1855 if (GET_MODE_BITSIZE (mode
) != bitpos
+ bitsize
)
1856 return expand_binop (GET_MODE (op0
), and_optab
, op0
,
1857 mask_rtx (GET_MODE (op0
), 0, bitsize
, 0),
1858 target
, 1, OPTAB_LIB_WIDEN
);
1862 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1863 then arithmetic-shift its lsb to the lsb of the word. */
1864 op0
= force_reg (mode
, op0
);
1868 /* Find the narrowest integer mode that contains the field. */
1870 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1871 mode
= GET_MODE_WIDER_MODE (mode
))
1872 if (GET_MODE_BITSIZE (mode
) >= bitsize
+ bitpos
)
1874 op0
= convert_to_mode (mode
, op0
, 0);
1878 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitpos
))
1881 = build_int_cst (NULL_TREE
,
1882 GET_MODE_BITSIZE (mode
) - (bitsize
+ bitpos
));
1883 /* Maybe propagate the target for the shift. */
1884 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1885 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1888 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
1889 build_int_cst (NULL_TREE
,
1890 GET_MODE_BITSIZE (mode
) - bitsize
),
1894 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1895 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1896 complement of that if COMPLEMENT. The mask is truncated if
1897 necessary to the width of mode MODE. The mask is zero-extended if
1898 BITSIZE+BITPOS is too small for MODE. */
1901 mask_rtx (enum machine_mode mode
, int bitpos
, int bitsize
, int complement
)
1905 mask
= double_int_mask (bitsize
);
1906 mask
= double_int_lshift (mask
, bitpos
, HOST_BITS_PER_DOUBLE_INT
, false);
1909 mask
= double_int_not (mask
);
1911 return immed_double_int_const (mask
, mode
);
1914 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1915 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1918 lshift_value (enum machine_mode mode
, rtx value
, int bitpos
, int bitsize
)
1922 val
= double_int_zext (uhwi_to_double_int (INTVAL (value
)), bitsize
);
1923 val
= double_int_lshift (val
, bitpos
, HOST_BITS_PER_DOUBLE_INT
, false);
1925 return immed_double_int_const (val
, mode
);
1928 /* Extract a bit field that is split across two words
1929 and return an RTX for the result.
1931 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1932 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1933 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1936 extract_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1937 unsigned HOST_WIDE_INT bitpos
, int unsignedp
)
1940 unsigned int bitsdone
= 0;
1941 rtx result
= NULL_RTX
;
1944 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1946 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1947 unit
= BITS_PER_WORD
;
1949 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1951 while (bitsdone
< bitsize
)
1953 unsigned HOST_WIDE_INT thissize
;
1955 unsigned HOST_WIDE_INT thispos
;
1956 unsigned HOST_WIDE_INT offset
;
1958 offset
= (bitpos
+ bitsdone
) / unit
;
1959 thispos
= (bitpos
+ bitsdone
) % unit
;
1961 /* THISSIZE must not overrun a word boundary. Otherwise,
1962 extract_fixed_bit_field will call us again, and we will mutually
1964 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1965 thissize
= MIN (thissize
, unit
- thispos
);
1967 /* If OP0 is a register, then handle OFFSET here.
1969 When handling multiword bitfields, extract_bit_field may pass
1970 down a word_mode SUBREG of a larger REG for a bitfield that actually
1971 crosses a word boundary. Thus, for a SUBREG, we must find
1972 the current word starting from the base register. */
1973 if (GET_CODE (op0
) == SUBREG
)
1975 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1976 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1977 GET_MODE (SUBREG_REG (op0
)));
1980 else if (REG_P (op0
))
1982 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1988 /* Extract the parts in bit-counting order,
1989 whose meaning is determined by BYTES_PER_UNIT.
1990 OFFSET is in UNITs, and UNIT is in bits.
1991 extract_fixed_bit_field wants offset in bytes. */
1992 part
= extract_fixed_bit_field (word_mode
, word
,
1993 offset
* unit
/ BITS_PER_UNIT
,
1994 thissize
, thispos
, 0, 1, false);
1995 bitsdone
+= thissize
;
1997 /* Shift this part into place for the result. */
1998 if (BYTES_BIG_ENDIAN
)
2000 if (bitsize
!= bitsdone
)
2001 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
2002 build_int_cst (NULL_TREE
, bitsize
- bitsdone
),
2007 if (bitsdone
!= thissize
)
2008 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
2009 build_int_cst (NULL_TREE
,
2010 bitsdone
- thissize
), 0, 1);
2016 /* Combine the parts with bitwise or. This works
2017 because we extracted each part as an unsigned bit field. */
2018 result
= expand_binop (word_mode
, ior_optab
, part
, result
, NULL_RTX
, 1,
2024 /* Unsigned bit field: we are done. */
2027 /* Signed bit field: sign-extend with two arithmetic shifts. */
2028 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
2029 build_int_cst (NULL_TREE
, BITS_PER_WORD
- bitsize
),
2031 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
2032 build_int_cst (NULL_TREE
, BITS_PER_WORD
- bitsize
),
2036 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2037 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2038 MODE, fill the upper bits with zeros. Fail if the layout of either
2039 mode is unknown (as for CC modes) or if the extraction would involve
2040 unprofitable mode punning. Return the value on success, otherwise
2043 This is different from gen_lowpart* in these respects:
2045 - the returned value must always be considered an rvalue
2047 - when MODE is wider than SRC_MODE, the extraction involves
2050 - when MODE is smaller than SRC_MODE, the extraction involves
2051 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
2053 In other words, this routine performs a computation, whereas the
2054 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2058 extract_low_bits (enum machine_mode mode
, enum machine_mode src_mode
, rtx src
)
2060 enum machine_mode int_mode
, src_int_mode
;
2062 if (mode
== src_mode
)
2065 if (CONSTANT_P (src
))
2067 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2068 fails, it will happily create (subreg (symbol_ref)) or similar
2070 unsigned int byte
= subreg_lowpart_offset (mode
, src_mode
);
2071 rtx ret
= simplify_subreg (mode
, src
, src_mode
, byte
);
2075 if (GET_MODE (src
) == VOIDmode
2076 || !validate_subreg (mode
, src_mode
, src
, byte
))
2079 src
= force_reg (GET_MODE (src
), src
);
2080 return gen_rtx_SUBREG (mode
, src
, byte
);
2083 if (GET_MODE_CLASS (mode
) == MODE_CC
|| GET_MODE_CLASS (src_mode
) == MODE_CC
)
2086 if (GET_MODE_BITSIZE (mode
) == GET_MODE_BITSIZE (src_mode
)
2087 && MODES_TIEABLE_P (mode
, src_mode
))
2089 rtx x
= gen_lowpart_common (mode
, src
);
2094 src_int_mode
= int_mode_for_mode (src_mode
);
2095 int_mode
= int_mode_for_mode (mode
);
2096 if (src_int_mode
== BLKmode
|| int_mode
== BLKmode
)
2099 if (!MODES_TIEABLE_P (src_int_mode
, src_mode
))
2101 if (!MODES_TIEABLE_P (int_mode
, mode
))
2104 src
= gen_lowpart (src_int_mode
, src
);
2105 src
= convert_modes (int_mode
, src_int_mode
, src
, true);
2106 src
= gen_lowpart (mode
, src
);
2110 /* Add INC into TARGET. */
2113 expand_inc (rtx target
, rtx inc
)
2115 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
2117 target
, 0, OPTAB_LIB_WIDEN
);
2118 if (value
!= target
)
2119 emit_move_insn (target
, value
);
2122 /* Subtract DEC from TARGET. */
2125 expand_dec (rtx target
, rtx dec
)
2127 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
2129 target
, 0, OPTAB_LIB_WIDEN
);
2130 if (value
!= target
)
2131 emit_move_insn (target
, value
);
2134 /* Output a shift instruction for expression code CODE,
2135 with SHIFTED being the rtx for the value to shift,
2136 and AMOUNT the tree for the amount to shift by.
2137 Store the result in the rtx TARGET, if that is convenient.
2138 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2139 Return the rtx for where the value is. */
2142 expand_shift (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2143 tree amount
, rtx target
, int unsignedp
)
2146 int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
2147 int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
2148 optab lshift_optab
= ashl_optab
;
2149 optab rshift_arith_optab
= ashr_optab
;
2150 optab rshift_uns_optab
= lshr_optab
;
2151 optab lrotate_optab
= rotl_optab
;
2152 optab rrotate_optab
= rotr_optab
;
2153 enum machine_mode op1_mode
;
2155 bool speed
= optimize_insn_for_speed_p ();
2157 op1
= expand_normal (amount
);
2158 op1_mode
= GET_MODE (op1
);
2160 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2161 shift amount is a vector, use the vector/vector shift patterns. */
2162 if (VECTOR_MODE_P (mode
) && VECTOR_MODE_P (op1_mode
))
2164 lshift_optab
= vashl_optab
;
2165 rshift_arith_optab
= vashr_optab
;
2166 rshift_uns_optab
= vlshr_optab
;
2167 lrotate_optab
= vrotl_optab
;
2168 rrotate_optab
= vrotr_optab
;
2171 /* Previously detected shift-counts computed by NEGATE_EXPR
2172 and shifted in the other direction; but that does not work
2175 if (SHIFT_COUNT_TRUNCATED
)
2177 if (CONST_INT_P (op1
)
2178 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
) >=
2179 (unsigned HOST_WIDE_INT
) GET_MODE_BITSIZE (mode
)))
2180 op1
= GEN_INT ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
2181 % GET_MODE_BITSIZE (mode
));
2182 else if (GET_CODE (op1
) == SUBREG
2183 && subreg_lowpart_p (op1
)
2184 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (op1
))))
2185 op1
= SUBREG_REG (op1
);
2188 if (op1
== const0_rtx
)
2191 /* Check whether its cheaper to implement a left shift by a constant
2192 bit count by a sequence of additions. */
2193 if (code
== LSHIFT_EXPR
2194 && CONST_INT_P (op1
)
2196 && INTVAL (op1
) < GET_MODE_BITSIZE (mode
)
2197 && INTVAL (op1
) < MAX_BITS_PER_WORD
2198 && shift_cost
[speed
][mode
][INTVAL (op1
)] > INTVAL (op1
) * add_cost
[speed
][mode
]
2199 && shift_cost
[speed
][mode
][INTVAL (op1
)] != MAX_COST
)
2202 for (i
= 0; i
< INTVAL (op1
); i
++)
2204 temp
= force_reg (mode
, shifted
);
2205 shifted
= expand_binop (mode
, add_optab
, temp
, temp
, NULL_RTX
,
2206 unsignedp
, OPTAB_LIB_WIDEN
);
2211 for (attempt
= 0; temp
== 0 && attempt
< 3; attempt
++)
2213 enum optab_methods methods
;
2216 methods
= OPTAB_DIRECT
;
2217 else if (attempt
== 1)
2218 methods
= OPTAB_WIDEN
;
2220 methods
= OPTAB_LIB_WIDEN
;
2224 /* Widening does not work for rotation. */
2225 if (methods
== OPTAB_WIDEN
)
2227 else if (methods
== OPTAB_LIB_WIDEN
)
2229 /* If we have been unable to open-code this by a rotation,
2230 do it as the IOR of two shifts. I.e., to rotate A
2231 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2232 where C is the bitsize of A.
2234 It is theoretically possible that the target machine might
2235 not be able to perform either shift and hence we would
2236 be making two libcalls rather than just the one for the
2237 shift (similarly if IOR could not be done). We will allow
2238 this extremely unlikely lossage to avoid complicating the
2241 rtx subtarget
= target
== shifted
? 0 : target
;
2242 tree new_amount
, other_amount
;
2244 tree type
= TREE_TYPE (amount
);
2245 if (GET_MODE (op1
) != TYPE_MODE (type
)
2246 && GET_MODE (op1
) != VOIDmode
)
2247 op1
= convert_to_mode (TYPE_MODE (type
), op1
, 1);
2248 new_amount
= make_tree (type
, op1
);
2250 = fold_build2 (MINUS_EXPR
, type
,
2251 build_int_cst (type
, GET_MODE_BITSIZE (mode
)),
2254 shifted
= force_reg (mode
, shifted
);
2256 temp
= expand_shift (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
2257 mode
, shifted
, new_amount
, 0, 1);
2258 temp1
= expand_shift (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
2259 mode
, shifted
, other_amount
, subtarget
, 1);
2260 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
2261 unsignedp
, methods
);
2264 temp
= expand_binop (mode
,
2265 left
? lrotate_optab
: rrotate_optab
,
2266 shifted
, op1
, target
, unsignedp
, methods
);
2269 temp
= expand_binop (mode
,
2270 left
? lshift_optab
: rshift_uns_optab
,
2271 shifted
, op1
, target
, unsignedp
, methods
);
2273 /* Do arithmetic shifts.
2274 Also, if we are going to widen the operand, we can just as well
2275 use an arithmetic right-shift instead of a logical one. */
2276 if (temp
== 0 && ! rotate
2277 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
2279 enum optab_methods methods1
= methods
;
2281 /* If trying to widen a log shift to an arithmetic shift,
2282 don't accept an arithmetic shift of the same size. */
2284 methods1
= OPTAB_MUST_WIDEN
;
2286 /* Arithmetic shift */
2288 temp
= expand_binop (mode
,
2289 left
? lshift_optab
: rshift_arith_optab
,
2290 shifted
, op1
, target
, unsignedp
, methods1
);
2293 /* We used to try extzv here for logical right shifts, but that was
2294 only useful for one machine, the VAX, and caused poor code
2295 generation there for lshrdi3, so the code was deleted and a
2296 define_expand for lshrsi3 was added to vax.md. */
2303 /* Indicates the type of fixup needed after a constant multiplication.
2304 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2305 the result should be negated, and ADD_VARIANT means that the
2306 multiplicand should be added to the result. */
2307 enum mult_variant
{basic_variant
, negate_variant
, add_variant
};
2309 static void synth_mult (struct algorithm
*, unsigned HOST_WIDE_INT
,
2310 const struct mult_cost
*, enum machine_mode mode
);
2311 static bool choose_mult_variant (enum machine_mode
, HOST_WIDE_INT
,
2312 struct algorithm
*, enum mult_variant
*, int);
2313 static rtx
expand_mult_const (enum machine_mode
, rtx
, HOST_WIDE_INT
, rtx
,
2314 const struct algorithm
*, enum mult_variant
);
2315 static unsigned HOST_WIDE_INT
choose_multiplier (unsigned HOST_WIDE_INT
, int,
2316 int, rtx
*, int *, int *);
2317 static unsigned HOST_WIDE_INT
invert_mod2n (unsigned HOST_WIDE_INT
, int);
2318 static rtx
extract_high_half (enum machine_mode
, rtx
);
2319 static rtx
expand_mult_highpart (enum machine_mode
, rtx
, rtx
, rtx
, int, int);
2320 static rtx
expand_mult_highpart_optab (enum machine_mode
, rtx
, rtx
, rtx
,
2322 /* Compute and return the best algorithm for multiplying by T.
2323 The algorithm must cost less than cost_limit
2324 If retval.cost >= COST_LIMIT, no algorithm was found and all
2325 other field of the returned struct are undefined.
2326 MODE is the machine mode of the multiplication. */
2329 synth_mult (struct algorithm
*alg_out
, unsigned HOST_WIDE_INT t
,
2330 const struct mult_cost
*cost_limit
, enum machine_mode mode
)
2333 struct algorithm
*alg_in
, *best_alg
;
2334 struct mult_cost best_cost
;
2335 struct mult_cost new_limit
;
2336 int op_cost
, op_latency
;
2337 unsigned HOST_WIDE_INT orig_t
= t
;
2338 unsigned HOST_WIDE_INT q
;
2339 int maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
2341 bool cache_hit
= false;
2342 enum alg_code cache_alg
= alg_zero
;
2343 bool speed
= optimize_insn_for_speed_p ();
2345 /* Indicate that no algorithm is yet found. If no algorithm
2346 is found, this value will be returned and indicate failure. */
2347 alg_out
->cost
.cost
= cost_limit
->cost
+ 1;
2348 alg_out
->cost
.latency
= cost_limit
->latency
+ 1;
2350 if (cost_limit
->cost
< 0
2351 || (cost_limit
->cost
== 0 && cost_limit
->latency
<= 0))
2354 /* Restrict the bits of "t" to the multiplication's mode. */
2355 t
&= GET_MODE_MASK (mode
);
2357 /* t == 1 can be done in zero cost. */
2361 alg_out
->cost
.cost
= 0;
2362 alg_out
->cost
.latency
= 0;
2363 alg_out
->op
[0] = alg_m
;
2367 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2371 if (MULT_COST_LESS (cost_limit
, zero_cost
[speed
]))
2376 alg_out
->cost
.cost
= zero_cost
[speed
];
2377 alg_out
->cost
.latency
= zero_cost
[speed
];
2378 alg_out
->op
[0] = alg_zero
;
2383 /* We'll be needing a couple extra algorithm structures now. */
2385 alg_in
= XALLOCA (struct algorithm
);
2386 best_alg
= XALLOCA (struct algorithm
);
2387 best_cost
= *cost_limit
;
2389 /* Compute the hash index. */
2390 hash_index
= (t
^ (unsigned int) mode
^ (speed
* 256)) % NUM_ALG_HASH_ENTRIES
;
2392 /* See if we already know what to do for T. */
2393 if (alg_hash
[hash_index
].t
== t
2394 && alg_hash
[hash_index
].mode
== mode
2395 && alg_hash
[hash_index
].mode
== mode
2396 && alg_hash
[hash_index
].speed
== speed
2397 && alg_hash
[hash_index
].alg
!= alg_unknown
)
2399 cache_alg
= alg_hash
[hash_index
].alg
;
2401 if (cache_alg
== alg_impossible
)
2403 /* The cache tells us that it's impossible to synthesize
2404 multiplication by T within alg_hash[hash_index].cost. */
2405 if (!CHEAPER_MULT_COST (&alg_hash
[hash_index
].cost
, cost_limit
))
2406 /* COST_LIMIT is at least as restrictive as the one
2407 recorded in the hash table, in which case we have no
2408 hope of synthesizing a multiplication. Just
2412 /* If we get here, COST_LIMIT is less restrictive than the
2413 one recorded in the hash table, so we may be able to
2414 synthesize a multiplication. Proceed as if we didn't
2415 have the cache entry. */
2419 if (CHEAPER_MULT_COST (cost_limit
, &alg_hash
[hash_index
].cost
))
2420 /* The cached algorithm shows that this multiplication
2421 requires more cost than COST_LIMIT. Just return. This
2422 way, we don't clobber this cache entry with
2423 alg_impossible but retain useful information. */
2435 goto do_alg_addsub_t_m2
;
2437 case alg_add_factor
:
2438 case alg_sub_factor
:
2439 goto do_alg_addsub_factor
;
2442 goto do_alg_add_t2_m
;
2445 goto do_alg_sub_t2_m
;
2453 /* If we have a group of zero bits at the low-order part of T, try
2454 multiplying by the remaining bits and then doing a shift. */
2459 m
= floor_log2 (t
& -t
); /* m = number of low zero bits */
2463 /* The function expand_shift will choose between a shift and
2464 a sequence of additions, so the observed cost is given as
2465 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2466 op_cost
= m
* add_cost
[speed
][mode
];
2467 if (shift_cost
[speed
][mode
][m
] < op_cost
)
2468 op_cost
= shift_cost
[speed
][mode
][m
];
2469 new_limit
.cost
= best_cost
.cost
- op_cost
;
2470 new_limit
.latency
= best_cost
.latency
- op_cost
;
2471 synth_mult (alg_in
, q
, &new_limit
, mode
);
2473 alg_in
->cost
.cost
+= op_cost
;
2474 alg_in
->cost
.latency
+= op_cost
;
2475 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2477 struct algorithm
*x
;
2478 best_cost
= alg_in
->cost
;
2479 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2480 best_alg
->log
[best_alg
->ops
] = m
;
2481 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2484 /* See if treating ORIG_T as a signed number yields a better
2485 sequence. Try this sequence only for a negative ORIG_T
2486 as it would be useless for a non-negative ORIG_T. */
2487 if ((HOST_WIDE_INT
) orig_t
< 0)
2489 /* Shift ORIG_T as follows because a right shift of a
2490 negative-valued signed type is implementation
2492 q
= ~(~orig_t
>> m
);
2493 /* The function expand_shift will choose between a shift
2494 and a sequence of additions, so the observed cost is
2495 given as MIN (m * add_cost[speed][mode],
2496 shift_cost[speed][mode][m]). */
2497 op_cost
= m
* add_cost
[speed
][mode
];
2498 if (shift_cost
[speed
][mode
][m
] < op_cost
)
2499 op_cost
= shift_cost
[speed
][mode
][m
];
2500 new_limit
.cost
= best_cost
.cost
- op_cost
;
2501 new_limit
.latency
= best_cost
.latency
- op_cost
;
2502 synth_mult (alg_in
, q
, &new_limit
, mode
);
2504 alg_in
->cost
.cost
+= op_cost
;
2505 alg_in
->cost
.latency
+= op_cost
;
2506 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2508 struct algorithm
*x
;
2509 best_cost
= alg_in
->cost
;
2510 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2511 best_alg
->log
[best_alg
->ops
] = m
;
2512 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2520 /* If we have an odd number, add or subtract one. */
2523 unsigned HOST_WIDE_INT w
;
2526 for (w
= 1; (w
& t
) != 0; w
<<= 1)
2528 /* If T was -1, then W will be zero after the loop. This is another
2529 case where T ends with ...111. Handling this with (T + 1) and
2530 subtract 1 produces slightly better code and results in algorithm
2531 selection much faster than treating it like the ...0111 case
2535 /* Reject the case where t is 3.
2536 Thus we prefer addition in that case. */
2539 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2541 op_cost
= add_cost
[speed
][mode
];
2542 new_limit
.cost
= best_cost
.cost
- op_cost
;
2543 new_limit
.latency
= best_cost
.latency
- op_cost
;
2544 synth_mult (alg_in
, t
+ 1, &new_limit
, mode
);
2546 alg_in
->cost
.cost
+= op_cost
;
2547 alg_in
->cost
.latency
+= op_cost
;
2548 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2550 struct algorithm
*x
;
2551 best_cost
= alg_in
->cost
;
2552 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2553 best_alg
->log
[best_alg
->ops
] = 0;
2554 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2559 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2561 op_cost
= add_cost
[speed
][mode
];
2562 new_limit
.cost
= best_cost
.cost
- op_cost
;
2563 new_limit
.latency
= best_cost
.latency
- op_cost
;
2564 synth_mult (alg_in
, t
- 1, &new_limit
, mode
);
2566 alg_in
->cost
.cost
+= op_cost
;
2567 alg_in
->cost
.latency
+= op_cost
;
2568 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2570 struct algorithm
*x
;
2571 best_cost
= alg_in
->cost
;
2572 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2573 best_alg
->log
[best_alg
->ops
] = 0;
2574 best_alg
->op
[best_alg
->ops
] = alg_add_t_m2
;
2578 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2579 quickly with a - a * n for some appropriate constant n. */
2580 m
= exact_log2 (-orig_t
+ 1);
2581 if (m
>= 0 && m
< maxm
)
2583 op_cost
= shiftsub1_cost
[speed
][mode
][m
];
2584 new_limit
.cost
= best_cost
.cost
- op_cost
;
2585 new_limit
.latency
= best_cost
.latency
- op_cost
;
2586 synth_mult (alg_in
, (unsigned HOST_WIDE_INT
) (-orig_t
+ 1) >> m
, &new_limit
, mode
);
2588 alg_in
->cost
.cost
+= op_cost
;
2589 alg_in
->cost
.latency
+= op_cost
;
2590 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2592 struct algorithm
*x
;
2593 best_cost
= alg_in
->cost
;
2594 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2595 best_alg
->log
[best_alg
->ops
] = m
;
2596 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2604 /* Look for factors of t of the form
2605 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2606 If we find such a factor, we can multiply by t using an algorithm that
2607 multiplies by q, shift the result by m and add/subtract it to itself.
2609 We search for large factors first and loop down, even if large factors
2610 are less probable than small; if we find a large factor we will find a
2611 good sequence quickly, and therefore be able to prune (by decreasing
2612 COST_LIMIT) the search. */
2614 do_alg_addsub_factor
:
2615 for (m
= floor_log2 (t
- 1); m
>= 2; m
--)
2617 unsigned HOST_WIDE_INT d
;
2619 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) + 1;
2620 if (t
% d
== 0 && t
> d
&& m
< maxm
2621 && (!cache_hit
|| cache_alg
== alg_add_factor
))
2623 /* If the target has a cheap shift-and-add instruction use
2624 that in preference to a shift insn followed by an add insn.
2625 Assume that the shift-and-add is "atomic" with a latency
2626 equal to its cost, otherwise assume that on superscalar
2627 hardware the shift may be executed concurrently with the
2628 earlier steps in the algorithm. */
2629 op_cost
= add_cost
[speed
][mode
] + shift_cost
[speed
][mode
][m
];
2630 if (shiftadd_cost
[speed
][mode
][m
] < op_cost
)
2632 op_cost
= shiftadd_cost
[speed
][mode
][m
];
2633 op_latency
= op_cost
;
2636 op_latency
= add_cost
[speed
][mode
];
2638 new_limit
.cost
= best_cost
.cost
- op_cost
;
2639 new_limit
.latency
= best_cost
.latency
- op_latency
;
2640 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2642 alg_in
->cost
.cost
+= op_cost
;
2643 alg_in
->cost
.latency
+= op_latency
;
2644 if (alg_in
->cost
.latency
< op_cost
)
2645 alg_in
->cost
.latency
= op_cost
;
2646 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2648 struct algorithm
*x
;
2649 best_cost
= alg_in
->cost
;
2650 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2651 best_alg
->log
[best_alg
->ops
] = m
;
2652 best_alg
->op
[best_alg
->ops
] = alg_add_factor
;
2654 /* Other factors will have been taken care of in the recursion. */
2658 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) - 1;
2659 if (t
% d
== 0 && t
> d
&& m
< maxm
2660 && (!cache_hit
|| cache_alg
== alg_sub_factor
))
2662 /* If the target has a cheap shift-and-subtract insn use
2663 that in preference to a shift insn followed by a sub insn.
2664 Assume that the shift-and-sub is "atomic" with a latency
2665 equal to it's cost, otherwise assume that on superscalar
2666 hardware the shift may be executed concurrently with the
2667 earlier steps in the algorithm. */
2668 op_cost
= add_cost
[speed
][mode
] + shift_cost
[speed
][mode
][m
];
2669 if (shiftsub0_cost
[speed
][mode
][m
] < op_cost
)
2671 op_cost
= shiftsub0_cost
[speed
][mode
][m
];
2672 op_latency
= op_cost
;
2675 op_latency
= add_cost
[speed
][mode
];
2677 new_limit
.cost
= best_cost
.cost
- op_cost
;
2678 new_limit
.latency
= best_cost
.latency
- op_latency
;
2679 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2681 alg_in
->cost
.cost
+= op_cost
;
2682 alg_in
->cost
.latency
+= op_latency
;
2683 if (alg_in
->cost
.latency
< op_cost
)
2684 alg_in
->cost
.latency
= op_cost
;
2685 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2687 struct algorithm
*x
;
2688 best_cost
= alg_in
->cost
;
2689 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2690 best_alg
->log
[best_alg
->ops
] = m
;
2691 best_alg
->op
[best_alg
->ops
] = alg_sub_factor
;
2699 /* Try shift-and-add (load effective address) instructions,
2700 i.e. do a*3, a*5, a*9. */
2707 if (m
>= 0 && m
< maxm
)
2709 op_cost
= shiftadd_cost
[speed
][mode
][m
];
2710 new_limit
.cost
= best_cost
.cost
- op_cost
;
2711 new_limit
.latency
= best_cost
.latency
- op_cost
;
2712 synth_mult (alg_in
, (t
- 1) >> m
, &new_limit
, mode
);
2714 alg_in
->cost
.cost
+= op_cost
;
2715 alg_in
->cost
.latency
+= op_cost
;
2716 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2718 struct algorithm
*x
;
2719 best_cost
= alg_in
->cost
;
2720 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2721 best_alg
->log
[best_alg
->ops
] = m
;
2722 best_alg
->op
[best_alg
->ops
] = alg_add_t2_m
;
2732 if (m
>= 0 && m
< maxm
)
2734 op_cost
= shiftsub0_cost
[speed
][mode
][m
];
2735 new_limit
.cost
= best_cost
.cost
- op_cost
;
2736 new_limit
.latency
= best_cost
.latency
- op_cost
;
2737 synth_mult (alg_in
, (t
+ 1) >> m
, &new_limit
, mode
);
2739 alg_in
->cost
.cost
+= op_cost
;
2740 alg_in
->cost
.latency
+= op_cost
;
2741 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2743 struct algorithm
*x
;
2744 best_cost
= alg_in
->cost
;
2745 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2746 best_alg
->log
[best_alg
->ops
] = m
;
2747 best_alg
->op
[best_alg
->ops
] = alg_sub_t2_m
;
2755 /* If best_cost has not decreased, we have not found any algorithm. */
2756 if (!CHEAPER_MULT_COST (&best_cost
, cost_limit
))
2758 /* We failed to find an algorithm. Record alg_impossible for
2759 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2760 we are asked to find an algorithm for T within the same or
2761 lower COST_LIMIT, we can immediately return to the
2763 alg_hash
[hash_index
].t
= t
;
2764 alg_hash
[hash_index
].mode
= mode
;
2765 alg_hash
[hash_index
].speed
= speed
;
2766 alg_hash
[hash_index
].alg
= alg_impossible
;
2767 alg_hash
[hash_index
].cost
= *cost_limit
;
2771 /* Cache the result. */
2774 alg_hash
[hash_index
].t
= t
;
2775 alg_hash
[hash_index
].mode
= mode
;
2776 alg_hash
[hash_index
].speed
= speed
;
2777 alg_hash
[hash_index
].alg
= best_alg
->op
[best_alg
->ops
];
2778 alg_hash
[hash_index
].cost
.cost
= best_cost
.cost
;
2779 alg_hash
[hash_index
].cost
.latency
= best_cost
.latency
;
2782 /* If we are getting a too long sequence for `struct algorithm'
2783 to record, make this search fail. */
2784 if (best_alg
->ops
== MAX_BITS_PER_WORD
)
2787 /* Copy the algorithm from temporary space to the space at alg_out.
2788 We avoid using structure assignment because the majority of
2789 best_alg is normally undefined, and this is a critical function. */
2790 alg_out
->ops
= best_alg
->ops
+ 1;
2791 alg_out
->cost
= best_cost
;
2792 memcpy (alg_out
->op
, best_alg
->op
,
2793 alg_out
->ops
* sizeof *alg_out
->op
);
2794 memcpy (alg_out
->log
, best_alg
->log
,
2795 alg_out
->ops
* sizeof *alg_out
->log
);
2798 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2799 Try three variations:
2801 - a shift/add sequence based on VAL itself
2802 - a shift/add sequence based on -VAL, followed by a negation
2803 - a shift/add sequence based on VAL - 1, followed by an addition.
2805 Return true if the cheapest of these cost less than MULT_COST,
2806 describing the algorithm in *ALG and final fixup in *VARIANT. */
2809 choose_mult_variant (enum machine_mode mode
, HOST_WIDE_INT val
,
2810 struct algorithm
*alg
, enum mult_variant
*variant
,
2813 struct algorithm alg2
;
2814 struct mult_cost limit
;
2816 bool speed
= optimize_insn_for_speed_p ();
2818 /* Fail quickly for impossible bounds. */
2822 /* Ensure that mult_cost provides a reasonable upper bound.
2823 Any constant multiplication can be performed with less
2824 than 2 * bits additions. */
2825 op_cost
= 2 * GET_MODE_BITSIZE (mode
) * add_cost
[speed
][mode
];
2826 if (mult_cost
> op_cost
)
2827 mult_cost
= op_cost
;
2829 *variant
= basic_variant
;
2830 limit
.cost
= mult_cost
;
2831 limit
.latency
= mult_cost
;
2832 synth_mult (alg
, val
, &limit
, mode
);
2834 /* This works only if the inverted value actually fits in an
2836 if (HOST_BITS_PER_INT
>= GET_MODE_BITSIZE (mode
))
2838 op_cost
= neg_cost
[speed
][mode
];
2839 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2841 limit
.cost
= alg
->cost
.cost
- op_cost
;
2842 limit
.latency
= alg
->cost
.latency
- op_cost
;
2846 limit
.cost
= mult_cost
- op_cost
;
2847 limit
.latency
= mult_cost
- op_cost
;
2850 synth_mult (&alg2
, -val
, &limit
, mode
);
2851 alg2
.cost
.cost
+= op_cost
;
2852 alg2
.cost
.latency
+= op_cost
;
2853 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2854 *alg
= alg2
, *variant
= negate_variant
;
2857 /* This proves very useful for division-by-constant. */
2858 op_cost
= add_cost
[speed
][mode
];
2859 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2861 limit
.cost
= alg
->cost
.cost
- op_cost
;
2862 limit
.latency
= alg
->cost
.latency
- op_cost
;
2866 limit
.cost
= mult_cost
- op_cost
;
2867 limit
.latency
= mult_cost
- op_cost
;
2870 synth_mult (&alg2
, val
- 1, &limit
, mode
);
2871 alg2
.cost
.cost
+= op_cost
;
2872 alg2
.cost
.latency
+= op_cost
;
2873 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2874 *alg
= alg2
, *variant
= add_variant
;
2876 return MULT_COST_LESS (&alg
->cost
, mult_cost
);
2879 /* A subroutine of expand_mult, used for constant multiplications.
2880 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2881 convenient. Use the shift/add sequence described by ALG and apply
2882 the final fixup specified by VARIANT. */
2885 expand_mult_const (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT val
,
2886 rtx target
, const struct algorithm
*alg
,
2887 enum mult_variant variant
)
2889 HOST_WIDE_INT val_so_far
;
2890 rtx insn
, accum
, tem
;
2892 enum machine_mode nmode
;
2894 /* Avoid referencing memory over and over and invalid sharing
2896 op0
= force_reg (mode
, op0
);
2898 /* ACCUM starts out either as OP0 or as a zero, depending on
2899 the first operation. */
2901 if (alg
->op
[0] == alg_zero
)
2903 accum
= copy_to_mode_reg (mode
, const0_rtx
);
2906 else if (alg
->op
[0] == alg_m
)
2908 accum
= copy_to_mode_reg (mode
, op0
);
2914 for (opno
= 1; opno
< alg
->ops
; opno
++)
2916 int log
= alg
->log
[opno
];
2917 rtx shift_subtarget
= optimize
? 0 : accum
;
2919 = (opno
== alg
->ops
- 1 && target
!= 0 && variant
!= add_variant
2922 rtx accum_target
= optimize
? 0 : accum
;
2924 switch (alg
->op
[opno
])
2927 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2928 build_int_cst (NULL_TREE
, log
),
2930 /* REG_EQUAL note will be attached to the following insn. */
2931 emit_move_insn (accum
, tem
);
2936 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2937 build_int_cst (NULL_TREE
, log
),
2939 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2940 add_target
? add_target
: accum_target
);
2941 val_so_far
+= (HOST_WIDE_INT
) 1 << log
;
2945 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2946 build_int_cst (NULL_TREE
, log
),
2948 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, tem
),
2949 add_target
? add_target
: accum_target
);
2950 val_so_far
-= (HOST_WIDE_INT
) 1 << log
;
2954 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2955 build_int_cst (NULL_TREE
, log
),
2958 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
),
2959 add_target
? add_target
: accum_target
);
2960 val_so_far
= (val_so_far
<< log
) + 1;
2964 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2965 build_int_cst (NULL_TREE
, log
),
2966 shift_subtarget
, 0);
2967 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, op0
),
2968 add_target
? add_target
: accum_target
);
2969 val_so_far
= (val_so_far
<< log
) - 1;
2972 case alg_add_factor
:
2973 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2974 build_int_cst (NULL_TREE
, log
),
2976 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2977 add_target
? add_target
: accum_target
);
2978 val_so_far
+= val_so_far
<< log
;
2981 case alg_sub_factor
:
2982 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2983 build_int_cst (NULL_TREE
, log
),
2985 accum
= force_operand (gen_rtx_MINUS (mode
, tem
, accum
),
2987 ? add_target
: (optimize
? 0 : tem
)));
2988 val_so_far
= (val_so_far
<< log
) - val_so_far
;
2995 /* Write a REG_EQUAL note on the last insn so that we can cse
2996 multiplication sequences. Note that if ACCUM is a SUBREG,
2997 we've set the inner register and must properly indicate
3000 tem
= op0
, nmode
= mode
;
3001 if (GET_CODE (accum
) == SUBREG
)
3003 nmode
= GET_MODE (SUBREG_REG (accum
));
3004 tem
= gen_lowpart (nmode
, op0
);
3007 insn
= get_last_insn ();
3008 set_unique_reg_note (insn
, REG_EQUAL
,
3009 gen_rtx_MULT (nmode
, tem
,
3010 GEN_INT (val_so_far
)));
3013 if (variant
== negate_variant
)
3015 val_so_far
= -val_so_far
;
3016 accum
= expand_unop (mode
, neg_optab
, accum
, target
, 0);
3018 else if (variant
== add_variant
)
3020 val_so_far
= val_so_far
+ 1;
3021 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
), target
);
3024 /* Compare only the bits of val and val_so_far that are significant
3025 in the result mode, to avoid sign-/zero-extension confusion. */
3026 val
&= GET_MODE_MASK (mode
);
3027 val_so_far
&= GET_MODE_MASK (mode
);
3028 gcc_assert (val
== val_so_far
);
3033 /* Perform a multiplication and return an rtx for the result.
3034 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3035 TARGET is a suggestion for where to store the result (an rtx).
3037 We check specially for a constant integer as OP1.
3038 If you want this check for OP0 as well, then before calling
3039 you should swap the two operands if OP0 would be constant. */
3042 expand_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3045 enum mult_variant variant
;
3046 struct algorithm algorithm
;
3048 bool speed
= optimize_insn_for_speed_p ();
3050 /* Handling const0_rtx here allows us to use zero as a rogue value for
3052 if (op1
== const0_rtx
)
3054 if (op1
== const1_rtx
)
3056 if (op1
== constm1_rtx
)
3057 return expand_unop (mode
,
3058 GET_MODE_CLASS (mode
) == MODE_INT
3059 && !unsignedp
&& flag_trapv
3060 ? negv_optab
: neg_optab
,
3063 /* These are the operations that are potentially turned into a sequence
3064 of shifts and additions. */
3065 if (SCALAR_INT_MODE_P (mode
)
3066 && (unsignedp
|| !flag_trapv
))
3068 HOST_WIDE_INT coeff
= 0;
3069 rtx fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3071 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3072 less than or equal in size to `unsigned int' this doesn't matter.
3073 If the mode is larger than `unsigned int', then synth_mult works
3074 only if the constant value exactly fits in an `unsigned int' without
3075 any truncation. This means that multiplying by negative values does
3076 not work; results are off by 2^32 on a 32 bit machine. */
3078 if (CONST_INT_P (op1
))
3080 /* Attempt to handle multiplication of DImode values by negative
3081 coefficients, by performing the multiplication by a positive
3082 multiplier and then inverting the result. */
3083 if (INTVAL (op1
) < 0
3084 && GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
)
3086 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3087 result is interpreted as an unsigned coefficient.
3088 Exclude cost of op0 from max_cost to match the cost
3089 calculation of the synth_mult. */
3090 max_cost
= rtx_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), SET
, speed
)
3091 - neg_cost
[speed
][mode
];
3093 && choose_mult_variant (mode
, -INTVAL (op1
), &algorithm
,
3094 &variant
, max_cost
))
3096 rtx temp
= expand_mult_const (mode
, op0
, -INTVAL (op1
),
3097 NULL_RTX
, &algorithm
,
3099 return expand_unop (mode
, neg_optab
, temp
, target
, 0);
3102 else coeff
= INTVAL (op1
);
3104 else if (GET_CODE (op1
) == CONST_DOUBLE
)
3106 /* If we are multiplying in DImode, it may still be a win
3107 to try to work with shifts and adds. */
3108 if (CONST_DOUBLE_HIGH (op1
) == 0
3109 && CONST_DOUBLE_LOW (op1
) > 0)
3110 coeff
= CONST_DOUBLE_LOW (op1
);
3111 else if (CONST_DOUBLE_LOW (op1
) == 0
3112 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1
)))
3114 int shift
= floor_log2 (CONST_DOUBLE_HIGH (op1
))
3115 + HOST_BITS_PER_WIDE_INT
;
3116 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3117 build_int_cst (NULL_TREE
, shift
),
3122 /* We used to test optimize here, on the grounds that it's better to
3123 produce a smaller program when -O is not used. But this causes
3124 such a terrible slowdown sometimes that it seems better to always
3128 /* Special case powers of two. */
3129 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3130 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3131 build_int_cst (NULL_TREE
, floor_log2 (coeff
)),
3134 /* Exclude cost of op0 from max_cost to match the cost
3135 calculation of the synth_mult. */
3136 max_cost
= rtx_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), SET
, speed
);
3137 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3139 return expand_mult_const (mode
, op0
, coeff
, target
,
3140 &algorithm
, variant
);
3144 if (GET_CODE (op0
) == CONST_DOUBLE
)
3151 /* Expand x*2.0 as x+x. */
3152 if (GET_CODE (op1
) == CONST_DOUBLE
3153 && SCALAR_FLOAT_MODE_P (mode
))
3156 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
3158 if (REAL_VALUES_EQUAL (d
, dconst2
))
3160 op0
= force_reg (GET_MODE (op0
), op0
);
3161 return expand_binop (mode
, add_optab
, op0
, op0
,
3162 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3166 /* This used to use umul_optab if unsigned, but for non-widening multiply
3167 there is no difference between signed and unsigned. */
3168 op0
= expand_binop (mode
,
3170 && flag_trapv
&& (GET_MODE_CLASS(mode
) == MODE_INT
)
3171 ? smulv_optab
: smul_optab
,
3172 op0
, op1
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
3177 /* Perform a widening multiplication and return an rtx for the result.
3178 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3179 TARGET is a suggestion for where to store the result (an rtx).
3180 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3181 or smul_widen_optab.
3183 We check specially for a constant integer as OP1, comparing the
3184 cost of a widening multiply against the cost of a sequence of shifts
3188 expand_widening_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3189 int unsignedp
, optab this_optab
)
3191 bool speed
= optimize_insn_for_speed_p ();
3193 if (CONST_INT_P (op1
)
3194 && (INTVAL (op1
) >= 0
3195 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
))
3197 HOST_WIDE_INT coeff
= INTVAL (op1
);
3199 enum mult_variant variant
;
3200 struct algorithm algorithm
;
3202 /* Special case powers of two. */
3203 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3205 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3206 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3207 build_int_cst (NULL_TREE
, floor_log2 (coeff
)),
3211 /* Exclude cost of op0 from max_cost to match the cost
3212 calculation of the synth_mult. */
3213 max_cost
= mul_widen_cost
[speed
][mode
];
3214 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3217 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3218 return expand_mult_const (mode
, op0
, coeff
, target
,
3219 &algorithm
, variant
);
3222 return expand_binop (mode
, this_optab
, op0
, op1
, target
,
3223 unsignedp
, OPTAB_LIB_WIDEN
);
3226 /* Return the smallest n such that 2**n >= X. */
3229 ceil_log2 (unsigned HOST_WIDE_INT x
)
3231 return floor_log2 (x
- 1) + 1;
3234 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3235 replace division by D, and put the least significant N bits of the result
3236 in *MULTIPLIER_PTR and return the most significant bit.
3238 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3239 needed precision is in PRECISION (should be <= N).
3241 PRECISION should be as small as possible so this function can choose
3242 multiplier more freely.
3244 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3245 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3247 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3248 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3251 unsigned HOST_WIDE_INT
3252 choose_multiplier (unsigned HOST_WIDE_INT d
, int n
, int precision
,
3253 rtx
*multiplier_ptr
, int *post_shift_ptr
, int *lgup_ptr
)
3255 HOST_WIDE_INT mhigh_hi
, mlow_hi
;
3256 unsigned HOST_WIDE_INT mhigh_lo
, mlow_lo
;
3257 int lgup
, post_shift
;
3259 unsigned HOST_WIDE_INT nl
, dummy1
;
3260 HOST_WIDE_INT nh
, dummy2
;
3262 /* lgup = ceil(log2(divisor)); */
3263 lgup
= ceil_log2 (d
);
3265 gcc_assert (lgup
<= n
);
3268 pow2
= n
+ lgup
- precision
;
3270 /* We could handle this with some effort, but this case is much
3271 better handled directly with a scc insn, so rely on caller using
3273 gcc_assert (pow
!= 2 * HOST_BITS_PER_WIDE_INT
);
3275 /* mlow = 2^(N + lgup)/d */
3276 if (pow
>= HOST_BITS_PER_WIDE_INT
)
3278 nh
= (HOST_WIDE_INT
) 1 << (pow
- HOST_BITS_PER_WIDE_INT
);
3284 nl
= (unsigned HOST_WIDE_INT
) 1 << pow
;
3286 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
3287 &mlow_lo
, &mlow_hi
, &dummy1
, &dummy2
);
3289 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3290 if (pow2
>= HOST_BITS_PER_WIDE_INT
)
3291 nh
|= (HOST_WIDE_INT
) 1 << (pow2
- HOST_BITS_PER_WIDE_INT
);
3293 nl
|= (unsigned HOST_WIDE_INT
) 1 << pow2
;
3294 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
3295 &mhigh_lo
, &mhigh_hi
, &dummy1
, &dummy2
);
3297 gcc_assert (!mhigh_hi
|| nh
- d
< d
);
3298 gcc_assert (mhigh_hi
<= 1 && mlow_hi
<= 1);
3299 /* Assert that mlow < mhigh. */
3300 gcc_assert (mlow_hi
< mhigh_hi
3301 || (mlow_hi
== mhigh_hi
&& mlow_lo
< mhigh_lo
));
3303 /* If precision == N, then mlow, mhigh exceed 2^N
3304 (but they do not exceed 2^(N+1)). */
3306 /* Reduce to lowest terms. */
3307 for (post_shift
= lgup
; post_shift
> 0; post_shift
--)
3309 unsigned HOST_WIDE_INT ml_lo
= (mlow_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mlow_lo
>> 1);
3310 unsigned HOST_WIDE_INT mh_lo
= (mhigh_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mhigh_lo
>> 1);
3320 *post_shift_ptr
= post_shift
;
3322 if (n
< HOST_BITS_PER_WIDE_INT
)
3324 unsigned HOST_WIDE_INT mask
= ((unsigned HOST_WIDE_INT
) 1 << n
) - 1;
3325 *multiplier_ptr
= GEN_INT (mhigh_lo
& mask
);
3326 return mhigh_lo
>= mask
;
3330 *multiplier_ptr
= GEN_INT (mhigh_lo
);
3335 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3336 congruent to 1 (mod 2**N). */
3338 static unsigned HOST_WIDE_INT
3339 invert_mod2n (unsigned HOST_WIDE_INT x
, int n
)
3341 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3343 /* The algorithm notes that the choice y = x satisfies
3344 x*y == 1 mod 2^3, since x is assumed odd.
3345 Each iteration doubles the number of bits of significance in y. */
3347 unsigned HOST_WIDE_INT mask
;
3348 unsigned HOST_WIDE_INT y
= x
;
3351 mask
= (n
== HOST_BITS_PER_WIDE_INT
3352 ? ~(unsigned HOST_WIDE_INT
) 0
3353 : ((unsigned HOST_WIDE_INT
) 1 << n
) - 1);
3357 y
= y
* (2 - x
*y
) & mask
; /* Modulo 2^N */
3363 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3364 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3365 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3366 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3369 The result is put in TARGET if that is convenient.
3371 MODE is the mode of operation. */
3374 expand_mult_highpart_adjust (enum machine_mode mode
, rtx adj_operand
, rtx op0
,
3375 rtx op1
, rtx target
, int unsignedp
)
3378 enum rtx_code adj_code
= unsignedp
? PLUS
: MINUS
;
3380 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3381 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
) - 1),
3383 tem
= expand_and (mode
, tem
, op1
, NULL_RTX
);
3385 = force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3388 tem
= expand_shift (RSHIFT_EXPR
, mode
, op1
,
3389 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
) - 1),
3391 tem
= expand_and (mode
, tem
, op0
, NULL_RTX
);
3392 target
= force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3398 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3401 extract_high_half (enum machine_mode mode
, rtx op
)
3403 enum machine_mode wider_mode
;
3405 if (mode
== word_mode
)
3406 return gen_highpart (mode
, op
);
3408 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3410 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3411 op
= expand_shift (RSHIFT_EXPR
, wider_mode
, op
,
3412 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
)), 0, 1);
3413 return convert_modes (mode
, wider_mode
, op
, 0);
3416 /* Like expand_mult_highpart, but only consider using a multiplication
3417 optab. OP1 is an rtx for the constant operand. */
3420 expand_mult_highpart_optab (enum machine_mode mode
, rtx op0
, rtx op1
,
3421 rtx target
, int unsignedp
, int max_cost
)
3423 rtx narrow_op1
= gen_int_mode (INTVAL (op1
), mode
);
3424 enum machine_mode wider_mode
;
3428 bool speed
= optimize_insn_for_speed_p ();
3430 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3432 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3433 size
= GET_MODE_BITSIZE (mode
);
3435 /* Firstly, try using a multiplication insn that only generates the needed
3436 high part of the product, and in the sign flavor of unsignedp. */
3437 if (mul_highpart_cost
[speed
][mode
] < max_cost
)
3439 moptab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
3440 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3441 unsignedp
, OPTAB_DIRECT
);
3446 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3447 Need to adjust the result after the multiplication. */
3448 if (size
- 1 < BITS_PER_WORD
3449 && (mul_highpart_cost
[speed
][mode
] + 2 * shift_cost
[speed
][mode
][size
-1]
3450 + 4 * add_cost
[speed
][mode
] < max_cost
))
3452 moptab
= unsignedp
? smul_highpart_optab
: umul_highpart_optab
;
3453 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3454 unsignedp
, OPTAB_DIRECT
);
3456 /* We used the wrong signedness. Adjust the result. */
3457 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3461 /* Try widening multiplication. */
3462 moptab
= unsignedp
? umul_widen_optab
: smul_widen_optab
;
3463 if (optab_handler (moptab
, wider_mode
) != CODE_FOR_nothing
3464 && mul_widen_cost
[speed
][wider_mode
] < max_cost
)
3466 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
, 0,
3467 unsignedp
, OPTAB_WIDEN
);
3469 return extract_high_half (mode
, tem
);
3472 /* Try widening the mode and perform a non-widening multiplication. */
3473 if (optab_handler (smul_optab
, wider_mode
) != CODE_FOR_nothing
3474 && size
- 1 < BITS_PER_WORD
3475 && mul_cost
[speed
][wider_mode
] + shift_cost
[speed
][mode
][size
-1] < max_cost
)
3477 rtx insns
, wop0
, wop1
;
3479 /* We need to widen the operands, for example to ensure the
3480 constant multiplier is correctly sign or zero extended.
3481 Use a sequence to clean-up any instructions emitted by
3482 the conversions if things don't work out. */
3484 wop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
3485 wop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
3486 tem
= expand_binop (wider_mode
, smul_optab
, wop0
, wop1
, 0,
3487 unsignedp
, OPTAB_WIDEN
);
3488 insns
= get_insns ();
3494 return extract_high_half (mode
, tem
);
3498 /* Try widening multiplication of opposite signedness, and adjust. */
3499 moptab
= unsignedp
? smul_widen_optab
: umul_widen_optab
;
3500 if (optab_handler (moptab
, wider_mode
) != CODE_FOR_nothing
3501 && size
- 1 < BITS_PER_WORD
3502 && (mul_widen_cost
[speed
][wider_mode
] + 2 * shift_cost
[speed
][mode
][size
-1]
3503 + 4 * add_cost
[speed
][mode
] < max_cost
))
3505 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
,
3506 NULL_RTX
, ! unsignedp
, OPTAB_WIDEN
);
3509 tem
= extract_high_half (mode
, tem
);
3510 /* We used the wrong signedness. Adjust the result. */
3511 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3519 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3520 putting the high half of the result in TARGET if that is convenient,
3521 and return where the result is. If the operation can not be performed,
3524 MODE is the mode of operation and result.
3526 UNSIGNEDP nonzero means unsigned multiply.
3528 MAX_COST is the total allowed cost for the expanded RTL. */
3531 expand_mult_highpart (enum machine_mode mode
, rtx op0
, rtx op1
,
3532 rtx target
, int unsignedp
, int max_cost
)
3534 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
3535 unsigned HOST_WIDE_INT cnst1
;
3537 bool sign_adjust
= false;
3538 enum mult_variant variant
;
3539 struct algorithm alg
;
3541 bool speed
= optimize_insn_for_speed_p ();
3543 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3544 /* We can't support modes wider than HOST_BITS_PER_INT. */
3545 gcc_assert (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
);
3547 cnst1
= INTVAL (op1
) & GET_MODE_MASK (mode
);
3549 /* We can't optimize modes wider than BITS_PER_WORD.
3550 ??? We might be able to perform double-word arithmetic if
3551 mode == word_mode, however all the cost calculations in
3552 synth_mult etc. assume single-word operations. */
3553 if (GET_MODE_BITSIZE (wider_mode
) > BITS_PER_WORD
)
3554 return expand_mult_highpart_optab (mode
, op0
, op1
, target
,
3555 unsignedp
, max_cost
);
3557 extra_cost
= shift_cost
[speed
][mode
][GET_MODE_BITSIZE (mode
) - 1];
3559 /* Check whether we try to multiply by a negative constant. */
3560 if (!unsignedp
&& ((cnst1
>> (GET_MODE_BITSIZE (mode
) - 1)) & 1))
3563 extra_cost
+= add_cost
[speed
][mode
];
3566 /* See whether shift/add multiplication is cheap enough. */
3567 if (choose_mult_variant (wider_mode
, cnst1
, &alg
, &variant
,
3568 max_cost
- extra_cost
))
3570 /* See whether the specialized multiplication optabs are
3571 cheaper than the shift/add version. */
3572 tem
= expand_mult_highpart_optab (mode
, op0
, op1
, target
, unsignedp
,
3573 alg
.cost
.cost
+ extra_cost
);
3577 tem
= convert_to_mode (wider_mode
, op0
, unsignedp
);
3578 tem
= expand_mult_const (wider_mode
, tem
, cnst1
, 0, &alg
, variant
);
3579 tem
= extract_high_half (mode
, tem
);
3581 /* Adjust result for signedness. */
3583 tem
= force_operand (gen_rtx_MINUS (mode
, tem
, op0
), tem
);
3587 return expand_mult_highpart_optab (mode
, op0
, op1
, target
,
3588 unsignedp
, max_cost
);
3592 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3595 expand_smod_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3597 unsigned HOST_WIDE_INT masklow
, maskhigh
;
3598 rtx result
, temp
, shift
, label
;
3601 logd
= floor_log2 (d
);
3602 result
= gen_reg_rtx (mode
);
3604 /* Avoid conditional branches when they're expensive. */
3605 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3606 && optimize_insn_for_speed_p ())
3608 rtx signmask
= emit_store_flag (result
, LT
, op0
, const0_rtx
,
3612 signmask
= force_reg (mode
, signmask
);
3613 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3614 shift
= GEN_INT (GET_MODE_BITSIZE (mode
) - logd
);
3616 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3617 which instruction sequence to use. If logical right shifts
3618 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3619 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3621 temp
= gen_rtx_LSHIFTRT (mode
, result
, shift
);
3622 if (optab_handler (lshr_optab
, mode
) == CODE_FOR_nothing
3623 || rtx_cost (temp
, SET
, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
3625 temp
= expand_binop (mode
, xor_optab
, op0
, signmask
,
3626 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3627 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3628 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3629 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3630 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3631 temp
= expand_binop (mode
, xor_optab
, temp
, signmask
,
3632 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3633 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3634 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3638 signmask
= expand_binop (mode
, lshr_optab
, signmask
, shift
,
3639 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3640 signmask
= force_reg (mode
, signmask
);
3642 temp
= expand_binop (mode
, add_optab
, op0
, signmask
,
3643 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3644 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3645 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3646 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3647 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3653 /* Mask contains the mode's signbit and the significant bits of the
3654 modulus. By including the signbit in the operation, many targets
3655 can avoid an explicit compare operation in the following comparison
3658 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3659 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3661 masklow
|= (HOST_WIDE_INT
) -1 << (GET_MODE_BITSIZE (mode
) - 1);
3665 maskhigh
= (HOST_WIDE_INT
) -1
3666 << (GET_MODE_BITSIZE (mode
) - HOST_BITS_PER_WIDE_INT
- 1);
3668 temp
= expand_binop (mode
, and_optab
, op0
,
3669 immed_double_const (masklow
, maskhigh
, mode
),
3670 result
, 1, OPTAB_LIB_WIDEN
);
3672 emit_move_insn (result
, temp
);
3674 label
= gen_label_rtx ();
3675 do_cmp_and_jump (result
, const0_rtx
, GE
, mode
, label
);
3677 temp
= expand_binop (mode
, sub_optab
, result
, const1_rtx
, result
,
3678 0, OPTAB_LIB_WIDEN
);
3679 masklow
= (HOST_WIDE_INT
) -1 << logd
;
3681 temp
= expand_binop (mode
, ior_optab
, temp
,
3682 immed_double_const (masklow
, maskhigh
, mode
),
3683 result
, 1, OPTAB_LIB_WIDEN
);
3684 temp
= expand_binop (mode
, add_optab
, temp
, const1_rtx
, result
,
3685 0, OPTAB_LIB_WIDEN
);
3687 emit_move_insn (result
, temp
);
3692 /* Expand signed division of OP0 by a power of two D in mode MODE.
3693 This routine is only called for positive values of D. */
3696 expand_sdiv_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3702 logd
= floor_log2 (d
);
3703 shift
= build_int_cst (NULL_TREE
, logd
);
3706 && BRANCH_COST (optimize_insn_for_speed_p (),
3709 temp
= gen_reg_rtx (mode
);
3710 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, 1);
3711 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3712 0, OPTAB_LIB_WIDEN
);
3713 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3716 #ifdef HAVE_conditional_move
3717 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3722 /* ??? emit_conditional_move forces a stack adjustment via
3723 compare_from_rtx so, if the sequence is discarded, it will
3724 be lost. Do it now instead. */
3725 do_pending_stack_adjust ();
3728 temp2
= copy_to_mode_reg (mode
, op0
);
3729 temp
= expand_binop (mode
, add_optab
, temp2
, GEN_INT (d
-1),
3730 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3731 temp
= force_reg (mode
, temp
);
3733 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3734 temp2
= emit_conditional_move (temp2
, LT
, temp2
, const0_rtx
,
3735 mode
, temp
, temp2
, mode
, 0);
3738 rtx seq
= get_insns ();
3741 return expand_shift (RSHIFT_EXPR
, mode
, temp2
, shift
, NULL_RTX
, 0);
3747 if (BRANCH_COST (optimize_insn_for_speed_p (),
3750 int ushift
= GET_MODE_BITSIZE (mode
) - logd
;
3752 temp
= gen_reg_rtx (mode
);
3753 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, -1);
3754 if (shift_cost
[optimize_insn_for_speed_p ()][mode
][ushift
] > COSTS_N_INSNS (1))
3755 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (d
- 1),
3756 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3758 temp
= expand_shift (RSHIFT_EXPR
, mode
, temp
,
3759 build_int_cst (NULL_TREE
, ushift
),
3761 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3762 0, OPTAB_LIB_WIDEN
);
3763 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3766 label
= gen_label_rtx ();
3767 temp
= copy_to_mode_reg (mode
, op0
);
3768 do_cmp_and_jump (temp
, const0_rtx
, GE
, mode
, label
);
3769 expand_inc (temp
, GEN_INT (d
- 1));
3771 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3774 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3775 if that is convenient, and returning where the result is.
3776 You may request either the quotient or the remainder as the result;
3777 specify REM_FLAG nonzero to get the remainder.
3779 CODE is the expression code for which kind of division this is;
3780 it controls how rounding is done. MODE is the machine mode to use.
3781 UNSIGNEDP nonzero means do unsigned division. */
3783 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3784 and then correct it by or'ing in missing high bits
3785 if result of ANDI is nonzero.
3786 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3787 This could optimize to a bfexts instruction.
3788 But C doesn't use these operations, so their optimizations are
3790 /* ??? For modulo, we don't actually need the highpart of the first product,
3791 the low part will do nicely. And for small divisors, the second multiply
3792 can also be a low-part only multiply or even be completely left out.
3793 E.g. to calculate the remainder of a division by 3 with a 32 bit
3794 multiply, multiply with 0x55555556 and extract the upper two bits;
3795 the result is exact for inputs up to 0x1fffffff.
3796 The input range can be reduced by using cross-sum rules.
3797 For odd divisors >= 3, the following table gives right shift counts
3798 so that if a number is shifted by an integer multiple of the given
3799 amount, the remainder stays the same:
3800 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3801 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3802 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3803 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3804 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3806 Cross-sum rules for even numbers can be derived by leaving as many bits
3807 to the right alone as the divisor has zeros to the right.
3808 E.g. if x is an unsigned 32 bit number:
3809 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3813 expand_divmod (int rem_flag
, enum tree_code code
, enum machine_mode mode
,
3814 rtx op0
, rtx op1
, rtx target
, int unsignedp
)
3816 enum machine_mode compute_mode
;
3818 rtx quotient
= 0, remainder
= 0;
3822 optab optab1
, optab2
;
3823 int op1_is_constant
, op1_is_pow2
= 0;
3824 int max_cost
, extra_cost
;
3825 static HOST_WIDE_INT last_div_const
= 0;
3826 static HOST_WIDE_INT ext_op1
;
3827 bool speed
= optimize_insn_for_speed_p ();
3829 op1_is_constant
= CONST_INT_P (op1
);
3830 if (op1_is_constant
)
3832 ext_op1
= INTVAL (op1
);
3834 ext_op1
&= GET_MODE_MASK (mode
);
3835 op1_is_pow2
= ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1
)
3836 || (! unsignedp
&& EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1
))));
3840 This is the structure of expand_divmod:
3842 First comes code to fix up the operands so we can perform the operations
3843 correctly and efficiently.
3845 Second comes a switch statement with code specific for each rounding mode.
3846 For some special operands this code emits all RTL for the desired
3847 operation, for other cases, it generates only a quotient and stores it in
3848 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3849 to indicate that it has not done anything.
3851 Last comes code that finishes the operation. If QUOTIENT is set and
3852 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3853 QUOTIENT is not set, it is computed using trunc rounding.
3855 We try to generate special code for division and remainder when OP1 is a
3856 constant. If |OP1| = 2**n we can use shifts and some other fast
3857 operations. For other values of OP1, we compute a carefully selected
3858 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3861 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3862 half of the product. Different strategies for generating the product are
3863 implemented in expand_mult_highpart.
3865 If what we actually want is the remainder, we generate that by another
3866 by-constant multiplication and a subtraction. */
3868 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3869 code below will malfunction if we are, so check here and handle
3870 the special case if so. */
3871 if (op1
== const1_rtx
)
3872 return rem_flag
? const0_rtx
: op0
;
3874 /* When dividing by -1, we could get an overflow.
3875 negv_optab can handle overflows. */
3876 if (! unsignedp
&& op1
== constm1_rtx
)
3880 return expand_unop (mode
, flag_trapv
&& GET_MODE_CLASS(mode
) == MODE_INT
3881 ? negv_optab
: neg_optab
, op0
, target
, 0);
3885 /* Don't use the function value register as a target
3886 since we have to read it as well as write it,
3887 and function-inlining gets confused by this. */
3888 && ((REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
3889 /* Don't clobber an operand while doing a multi-step calculation. */
3890 || ((rem_flag
|| op1_is_constant
)
3891 && (reg_mentioned_p (target
, op0
)
3892 || (MEM_P (op0
) && MEM_P (target
))))
3893 || reg_mentioned_p (target
, op1
)
3894 || (MEM_P (op1
) && MEM_P (target
))))
3897 /* Get the mode in which to perform this computation. Normally it will
3898 be MODE, but sometimes we can't do the desired operation in MODE.
3899 If so, pick a wider mode in which we can do the operation. Convert
3900 to that mode at the start to avoid repeated conversions.
3902 First see what operations we need. These depend on the expression
3903 we are evaluating. (We assume that divxx3 insns exist under the
3904 same conditions that modxx3 insns and that these insns don't normally
3905 fail. If these assumptions are not correct, we may generate less
3906 efficient code in some cases.)
3908 Then see if we find a mode in which we can open-code that operation
3909 (either a division, modulus, or shift). Finally, check for the smallest
3910 mode for which we can do the operation with a library call. */
3912 /* We might want to refine this now that we have division-by-constant
3913 optimization. Since expand_mult_highpart tries so many variants, it is
3914 not straightforward to generalize this. Maybe we should make an array
3915 of possible modes in init_expmed? Save this for GCC 2.7. */
3917 optab1
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3918 ? (unsignedp
? lshr_optab
: ashr_optab
)
3919 : (unsignedp
? udiv_optab
: sdiv_optab
));
3920 optab2
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3922 : (unsignedp
? udivmod_optab
: sdivmod_optab
));
3924 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3925 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3926 if (optab_handler (optab1
, compute_mode
) != CODE_FOR_nothing
3927 || optab_handler (optab2
, compute_mode
) != CODE_FOR_nothing
)
3930 if (compute_mode
== VOIDmode
)
3931 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3932 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3933 if (optab_libfunc (optab1
, compute_mode
)
3934 || optab_libfunc (optab2
, compute_mode
))
3937 /* If we still couldn't find a mode, use MODE, but expand_binop will
3939 if (compute_mode
== VOIDmode
)
3940 compute_mode
= mode
;
3942 if (target
&& GET_MODE (target
) == compute_mode
)
3945 tquotient
= gen_reg_rtx (compute_mode
);
3947 size
= GET_MODE_BITSIZE (compute_mode
);
3949 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3950 (mode), and thereby get better code when OP1 is a constant. Do that
3951 later. It will require going over all usages of SIZE below. */
3952 size
= GET_MODE_BITSIZE (mode
);
3955 /* Only deduct something for a REM if the last divide done was
3956 for a different constant. Then set the constant of the last
3958 max_cost
= unsignedp
? udiv_cost
[speed
][compute_mode
] : sdiv_cost
[speed
][compute_mode
];
3959 if (rem_flag
&& ! (last_div_const
!= 0 && op1_is_constant
3960 && INTVAL (op1
) == last_div_const
))
3961 max_cost
-= mul_cost
[speed
][compute_mode
] + add_cost
[speed
][compute_mode
];
3963 last_div_const
= ! rem_flag
&& op1_is_constant
? INTVAL (op1
) : 0;
3965 /* Now convert to the best mode to use. */
3966 if (compute_mode
!= mode
)
3968 op0
= convert_modes (compute_mode
, mode
, op0
, unsignedp
);
3969 op1
= convert_modes (compute_mode
, mode
, op1
, unsignedp
);
3971 /* convert_modes may have placed op1 into a register, so we
3972 must recompute the following. */
3973 op1_is_constant
= CONST_INT_P (op1
);
3974 op1_is_pow2
= (op1_is_constant
3975 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
3977 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1
)))))) ;
3980 /* If one of the operands is a volatile MEM, copy it into a register. */
3982 if (MEM_P (op0
) && MEM_VOLATILE_P (op0
))
3983 op0
= force_reg (compute_mode
, op0
);
3984 if (MEM_P (op1
) && MEM_VOLATILE_P (op1
))
3985 op1
= force_reg (compute_mode
, op1
);
3987 /* If we need the remainder or if OP1 is constant, we need to
3988 put OP0 in a register in case it has any queued subexpressions. */
3989 if (rem_flag
|| op1_is_constant
)
3990 op0
= force_reg (compute_mode
, op0
);
3992 last
= get_last_insn ();
3994 /* Promote floor rounding to trunc rounding for unsigned operations. */
3997 if (code
== FLOOR_DIV_EXPR
)
3998 code
= TRUNC_DIV_EXPR
;
3999 if (code
== FLOOR_MOD_EXPR
)
4000 code
= TRUNC_MOD_EXPR
;
4001 if (code
== EXACT_DIV_EXPR
&& op1_is_pow2
)
4002 code
= TRUNC_DIV_EXPR
;
4005 if (op1
!= const0_rtx
)
4008 case TRUNC_MOD_EXPR
:
4009 case TRUNC_DIV_EXPR
:
4010 if (op1_is_constant
)
4014 unsigned HOST_WIDE_INT mh
;
4015 int pre_shift
, post_shift
;
4018 unsigned HOST_WIDE_INT d
= (INTVAL (op1
)
4019 & GET_MODE_MASK (compute_mode
));
4021 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4023 pre_shift
= floor_log2 (d
);
4027 = expand_binop (compute_mode
, and_optab
, op0
,
4028 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4032 return gen_lowpart (mode
, remainder
);
4034 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4035 build_int_cst (NULL_TREE
,
4039 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4041 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (size
- 1)))
4043 /* Most significant bit of divisor is set; emit an scc
4045 quotient
= emit_store_flag_force (tquotient
, GEU
, op0
, op1
,
4046 compute_mode
, 1, 1);
4050 /* Find a suitable multiplier and right shift count
4051 instead of multiplying with D. */
4053 mh
= choose_multiplier (d
, size
, size
,
4054 &ml
, &post_shift
, &dummy
);
4056 /* If the suggested multiplier is more than SIZE bits,
4057 we can do better for even divisors, using an
4058 initial right shift. */
4059 if (mh
!= 0 && (d
& 1) == 0)
4061 pre_shift
= floor_log2 (d
& -d
);
4062 mh
= choose_multiplier (d
>> pre_shift
, size
,
4064 &ml
, &post_shift
, &dummy
);
4074 if (post_shift
- 1 >= BITS_PER_WORD
)
4078 = (shift_cost
[speed
][compute_mode
][post_shift
- 1]
4079 + shift_cost
[speed
][compute_mode
][1]
4080 + 2 * add_cost
[speed
][compute_mode
]);
4081 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
4083 max_cost
- extra_cost
);
4086 t2
= force_operand (gen_rtx_MINUS (compute_mode
,
4089 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
, t2
,
4090 integer_one_node
, NULL_RTX
, 1);
4091 t4
= force_operand (gen_rtx_PLUS (compute_mode
,
4094 quotient
= expand_shift
4095 (RSHIFT_EXPR
, compute_mode
, t4
,
4096 build_int_cst (NULL_TREE
, post_shift
- 1),
4103 if (pre_shift
>= BITS_PER_WORD
4104 || post_shift
>= BITS_PER_WORD
)
4108 (RSHIFT_EXPR
, compute_mode
, op0
,
4109 build_int_cst (NULL_TREE
, pre_shift
),
4112 = (shift_cost
[speed
][compute_mode
][pre_shift
]
4113 + shift_cost
[speed
][compute_mode
][post_shift
]);
4114 t2
= expand_mult_highpart (compute_mode
, t1
, ml
,
4116 max_cost
- extra_cost
);
4119 quotient
= expand_shift
4120 (RSHIFT_EXPR
, compute_mode
, t2
,
4121 build_int_cst (NULL_TREE
, post_shift
),
4126 else /* Too wide mode to use tricky code */
4129 insn
= get_last_insn ();
4131 && (set
= single_set (insn
)) != 0
4132 && SET_DEST (set
) == quotient
)
4133 set_unique_reg_note (insn
,
4135 gen_rtx_UDIV (compute_mode
, op0
, op1
));
4137 else /* TRUNC_DIV, signed */
4139 unsigned HOST_WIDE_INT ml
;
4140 int lgup
, post_shift
;
4142 HOST_WIDE_INT d
= INTVAL (op1
);
4143 unsigned HOST_WIDE_INT abs_d
;
4145 /* Since d might be INT_MIN, we have to cast to
4146 unsigned HOST_WIDE_INT before negating to avoid
4147 undefined signed overflow. */
4149 ? (unsigned HOST_WIDE_INT
) d
4150 : - (unsigned HOST_WIDE_INT
) d
);
4152 /* n rem d = n rem -d */
4153 if (rem_flag
&& d
< 0)
4156 op1
= gen_int_mode (abs_d
, compute_mode
);
4162 quotient
= expand_unop (compute_mode
, neg_optab
, op0
,
4164 else if (HOST_BITS_PER_WIDE_INT
>= size
4165 && abs_d
== (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4167 /* This case is not handled correctly below. */
4168 quotient
= emit_store_flag (tquotient
, EQ
, op0
, op1
,
4169 compute_mode
, 1, 1);
4173 else if (EXACT_POWER_OF_2_OR_ZERO_P (d
)
4174 && (rem_flag
? smod_pow2_cheap
[speed
][compute_mode
]
4175 : sdiv_pow2_cheap
[speed
][compute_mode
])
4176 /* We assume that cheap metric is true if the
4177 optab has an expander for this mode. */
4178 && ((optab_handler ((rem_flag
? smod_optab
4181 != CODE_FOR_nothing
)
4182 || (optab_handler (sdivmod_optab
,
4184 != CODE_FOR_nothing
)))
4186 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d
))
4190 remainder
= expand_smod_pow2 (compute_mode
, op0
, d
);
4192 return gen_lowpart (mode
, remainder
);
4195 if (sdiv_pow2_cheap
[speed
][compute_mode
]
4196 && ((optab_handler (sdiv_optab
, compute_mode
)
4197 != CODE_FOR_nothing
)
4198 || (optab_handler (sdivmod_optab
, compute_mode
)
4199 != CODE_FOR_nothing
)))
4200 quotient
= expand_divmod (0, TRUNC_DIV_EXPR
,
4202 gen_int_mode (abs_d
,
4206 quotient
= expand_sdiv_pow2 (compute_mode
, op0
, abs_d
);
4208 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4209 negate the quotient. */
4212 insn
= get_last_insn ();
4214 && (set
= single_set (insn
)) != 0
4215 && SET_DEST (set
) == quotient
4216 && abs_d
< ((unsigned HOST_WIDE_INT
) 1
4217 << (HOST_BITS_PER_WIDE_INT
- 1)))
4218 set_unique_reg_note (insn
,
4220 gen_rtx_DIV (compute_mode
,
4227 quotient
= expand_unop (compute_mode
, neg_optab
,
4228 quotient
, quotient
, 0);
4231 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4233 choose_multiplier (abs_d
, size
, size
- 1,
4234 &mlr
, &post_shift
, &lgup
);
4235 ml
= (unsigned HOST_WIDE_INT
) INTVAL (mlr
);
4236 if (ml
< (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4240 if (post_shift
>= BITS_PER_WORD
4241 || size
- 1 >= BITS_PER_WORD
)
4244 extra_cost
= (shift_cost
[speed
][compute_mode
][post_shift
]
4245 + shift_cost
[speed
][compute_mode
][size
- 1]
4246 + add_cost
[speed
][compute_mode
]);
4247 t1
= expand_mult_highpart (compute_mode
, op0
, mlr
,
4249 max_cost
- extra_cost
);
4253 (RSHIFT_EXPR
, compute_mode
, t1
,
4254 build_int_cst (NULL_TREE
, post_shift
),
4257 (RSHIFT_EXPR
, compute_mode
, op0
,
4258 build_int_cst (NULL_TREE
, size
- 1),
4262 = force_operand (gen_rtx_MINUS (compute_mode
,
4267 = force_operand (gen_rtx_MINUS (compute_mode
,
4275 if (post_shift
>= BITS_PER_WORD
4276 || size
- 1 >= BITS_PER_WORD
)
4279 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (size
- 1);
4280 mlr
= gen_int_mode (ml
, compute_mode
);
4281 extra_cost
= (shift_cost
[speed
][compute_mode
][post_shift
]
4282 + shift_cost
[speed
][compute_mode
][size
- 1]
4283 + 2 * add_cost
[speed
][compute_mode
]);
4284 t1
= expand_mult_highpart (compute_mode
, op0
, mlr
,
4286 max_cost
- extra_cost
);
4289 t2
= force_operand (gen_rtx_PLUS (compute_mode
,
4293 (RSHIFT_EXPR
, compute_mode
, t2
,
4294 build_int_cst (NULL_TREE
, post_shift
),
4297 (RSHIFT_EXPR
, compute_mode
, op0
,
4298 build_int_cst (NULL_TREE
, size
- 1),
4302 = force_operand (gen_rtx_MINUS (compute_mode
,
4307 = force_operand (gen_rtx_MINUS (compute_mode
,
4312 else /* Too wide mode to use tricky code */
4315 insn
= get_last_insn ();
4317 && (set
= single_set (insn
)) != 0
4318 && SET_DEST (set
) == quotient
)
4319 set_unique_reg_note (insn
,
4321 gen_rtx_DIV (compute_mode
, op0
, op1
));
4326 delete_insns_since (last
);
4329 case FLOOR_DIV_EXPR
:
4330 case FLOOR_MOD_EXPR
:
4331 /* We will come here only for signed operations. */
4332 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4334 unsigned HOST_WIDE_INT mh
;
4335 int pre_shift
, lgup
, post_shift
;
4336 HOST_WIDE_INT d
= INTVAL (op1
);
4341 /* We could just as easily deal with negative constants here,
4342 but it does not seem worth the trouble for GCC 2.6. */
4343 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4345 pre_shift
= floor_log2 (d
);
4348 remainder
= expand_binop (compute_mode
, and_optab
, op0
,
4349 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4350 remainder
, 0, OPTAB_LIB_WIDEN
);
4352 return gen_lowpart (mode
, remainder
);
4354 quotient
= expand_shift
4355 (RSHIFT_EXPR
, compute_mode
, op0
,
4356 build_int_cst (NULL_TREE
, pre_shift
),
4363 mh
= choose_multiplier (d
, size
, size
- 1,
4364 &ml
, &post_shift
, &lgup
);
4367 if (post_shift
< BITS_PER_WORD
4368 && size
- 1 < BITS_PER_WORD
)
4371 (RSHIFT_EXPR
, compute_mode
, op0
,
4372 build_int_cst (NULL_TREE
, size
- 1),
4374 t2
= expand_binop (compute_mode
, xor_optab
, op0
, t1
,
4375 NULL_RTX
, 0, OPTAB_WIDEN
);
4376 extra_cost
= (shift_cost
[speed
][compute_mode
][post_shift
]
4377 + shift_cost
[speed
][compute_mode
][size
- 1]
4378 + 2 * add_cost
[speed
][compute_mode
]);
4379 t3
= expand_mult_highpart (compute_mode
, t2
, ml
,
4381 max_cost
- extra_cost
);
4385 (RSHIFT_EXPR
, compute_mode
, t3
,
4386 build_int_cst (NULL_TREE
, post_shift
),
4388 quotient
= expand_binop (compute_mode
, xor_optab
,
4389 t4
, t1
, tquotient
, 0,
4397 rtx nsign
, t1
, t2
, t3
, t4
;
4398 t1
= force_operand (gen_rtx_PLUS (compute_mode
,
4399 op0
, constm1_rtx
), NULL_RTX
);
4400 t2
= expand_binop (compute_mode
, ior_optab
, op0
, t1
, NULL_RTX
,
4402 nsign
= expand_shift
4403 (RSHIFT_EXPR
, compute_mode
, t2
,
4404 build_int_cst (NULL_TREE
, size
- 1),
4406 t3
= force_operand (gen_rtx_MINUS (compute_mode
, t1
, nsign
),
4408 t4
= expand_divmod (0, TRUNC_DIV_EXPR
, compute_mode
, t3
, op1
,
4413 t5
= expand_unop (compute_mode
, one_cmpl_optab
, nsign
,
4415 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4424 delete_insns_since (last
);
4426 /* Try using an instruction that produces both the quotient and
4427 remainder, using truncation. We can easily compensate the quotient
4428 or remainder to get floor rounding, once we have the remainder.
4429 Notice that we compute also the final remainder value here,
4430 and return the result right away. */
4431 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4432 target
= gen_reg_rtx (compute_mode
);
4437 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4438 quotient
= gen_reg_rtx (compute_mode
);
4443 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4444 remainder
= gen_reg_rtx (compute_mode
);
4447 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
,
4448 quotient
, remainder
, 0))
4450 /* This could be computed with a branch-less sequence.
4451 Save that for later. */
4453 rtx label
= gen_label_rtx ();
4454 do_cmp_and_jump (remainder
, const0_rtx
, EQ
, compute_mode
, label
);
4455 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4456 NULL_RTX
, 0, OPTAB_WIDEN
);
4457 do_cmp_and_jump (tem
, const0_rtx
, GE
, compute_mode
, label
);
4458 expand_dec (quotient
, const1_rtx
);
4459 expand_inc (remainder
, op1
);
4461 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4464 /* No luck with division elimination or divmod. Have to do it
4465 by conditionally adjusting op0 *and* the result. */
4467 rtx label1
, label2
, label3
, label4
, label5
;
4471 quotient
= gen_reg_rtx (compute_mode
);
4472 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4473 label1
= gen_label_rtx ();
4474 label2
= gen_label_rtx ();
4475 label3
= gen_label_rtx ();
4476 label4
= gen_label_rtx ();
4477 label5
= gen_label_rtx ();
4478 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4479 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
, compute_mode
, label1
);
4480 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4481 quotient
, 0, OPTAB_LIB_WIDEN
);
4482 if (tem
!= quotient
)
4483 emit_move_insn (quotient
, tem
);
4484 emit_jump_insn (gen_jump (label5
));
4486 emit_label (label1
);
4487 expand_inc (adjusted_op0
, const1_rtx
);
4488 emit_jump_insn (gen_jump (label4
));
4490 emit_label (label2
);
4491 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
, compute_mode
, label3
);
4492 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4493 quotient
, 0, OPTAB_LIB_WIDEN
);
4494 if (tem
!= quotient
)
4495 emit_move_insn (quotient
, tem
);
4496 emit_jump_insn (gen_jump (label5
));
4498 emit_label (label3
);
4499 expand_dec (adjusted_op0
, const1_rtx
);
4500 emit_label (label4
);
4501 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4502 quotient
, 0, OPTAB_LIB_WIDEN
);
4503 if (tem
!= quotient
)
4504 emit_move_insn (quotient
, tem
);
4505 expand_dec (quotient
, const1_rtx
);
4506 emit_label (label5
);
4514 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
)))
4517 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4518 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4519 build_int_cst (NULL_TREE
, floor_log2 (d
)),
4521 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4523 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4524 t3
= gen_reg_rtx (compute_mode
);
4525 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4526 compute_mode
, 1, 1);
4530 lab
= gen_label_rtx ();
4531 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4532 expand_inc (t1
, const1_rtx
);
4537 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4543 /* Try using an instruction that produces both the quotient and
4544 remainder, using truncation. We can easily compensate the
4545 quotient or remainder to get ceiling rounding, once we have the
4546 remainder. Notice that we compute also the final remainder
4547 value here, and return the result right away. */
4548 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4549 target
= gen_reg_rtx (compute_mode
);
4553 remainder
= (REG_P (target
)
4554 ? target
: gen_reg_rtx (compute_mode
));
4555 quotient
= gen_reg_rtx (compute_mode
);
4559 quotient
= (REG_P (target
)
4560 ? target
: gen_reg_rtx (compute_mode
));
4561 remainder
= gen_reg_rtx (compute_mode
);
4564 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
,
4567 /* This could be computed with a branch-less sequence.
4568 Save that for later. */
4569 rtx label
= gen_label_rtx ();
4570 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4571 compute_mode
, label
);
4572 expand_inc (quotient
, const1_rtx
);
4573 expand_dec (remainder
, op1
);
4575 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4578 /* No luck with division elimination or divmod. Have to do it
4579 by conditionally adjusting op0 *and* the result. */
4582 rtx adjusted_op0
, tem
;
4584 quotient
= gen_reg_rtx (compute_mode
);
4585 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4586 label1
= gen_label_rtx ();
4587 label2
= gen_label_rtx ();
4588 do_cmp_and_jump (adjusted_op0
, const0_rtx
, NE
,
4589 compute_mode
, label1
);
4590 emit_move_insn (quotient
, const0_rtx
);
4591 emit_jump_insn (gen_jump (label2
));
4593 emit_label (label1
);
4594 expand_dec (adjusted_op0
, const1_rtx
);
4595 tem
= expand_binop (compute_mode
, udiv_optab
, adjusted_op0
, op1
,
4596 quotient
, 1, OPTAB_LIB_WIDEN
);
4597 if (tem
!= quotient
)
4598 emit_move_insn (quotient
, tem
);
4599 expand_inc (quotient
, const1_rtx
);
4600 emit_label (label2
);
4605 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4606 && INTVAL (op1
) >= 0)
4608 /* This is extremely similar to the code for the unsigned case
4609 above. For 2.7 we should merge these variants, but for
4610 2.6.1 I don't want to touch the code for unsigned since that
4611 get used in C. The signed case will only be used by other
4615 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4616 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4617 build_int_cst (NULL_TREE
, floor_log2 (d
)),
4619 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4621 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4622 t3
= gen_reg_rtx (compute_mode
);
4623 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4624 compute_mode
, 1, 1);
4628 lab
= gen_label_rtx ();
4629 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4630 expand_inc (t1
, const1_rtx
);
4635 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4641 /* Try using an instruction that produces both the quotient and
4642 remainder, using truncation. We can easily compensate the
4643 quotient or remainder to get ceiling rounding, once we have the
4644 remainder. Notice that we compute also the final remainder
4645 value here, and return the result right away. */
4646 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4647 target
= gen_reg_rtx (compute_mode
);
4650 remainder
= (REG_P (target
)
4651 ? target
: gen_reg_rtx (compute_mode
));
4652 quotient
= gen_reg_rtx (compute_mode
);
4656 quotient
= (REG_P (target
)
4657 ? target
: gen_reg_rtx (compute_mode
));
4658 remainder
= gen_reg_rtx (compute_mode
);
4661 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
,
4664 /* This could be computed with a branch-less sequence.
4665 Save that for later. */
4667 rtx label
= gen_label_rtx ();
4668 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4669 compute_mode
, label
);
4670 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4671 NULL_RTX
, 0, OPTAB_WIDEN
);
4672 do_cmp_and_jump (tem
, const0_rtx
, LT
, compute_mode
, label
);
4673 expand_inc (quotient
, const1_rtx
);
4674 expand_dec (remainder
, op1
);
4676 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4679 /* No luck with division elimination or divmod. Have to do it
4680 by conditionally adjusting op0 *and* the result. */
4682 rtx label1
, label2
, label3
, label4
, label5
;
4686 quotient
= gen_reg_rtx (compute_mode
);
4687 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4688 label1
= gen_label_rtx ();
4689 label2
= gen_label_rtx ();
4690 label3
= gen_label_rtx ();
4691 label4
= gen_label_rtx ();
4692 label5
= gen_label_rtx ();
4693 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4694 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
,
4695 compute_mode
, label1
);
4696 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4697 quotient
, 0, OPTAB_LIB_WIDEN
);
4698 if (tem
!= quotient
)
4699 emit_move_insn (quotient
, tem
);
4700 emit_jump_insn (gen_jump (label5
));
4702 emit_label (label1
);
4703 expand_dec (adjusted_op0
, const1_rtx
);
4704 emit_jump_insn (gen_jump (label4
));
4706 emit_label (label2
);
4707 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
,
4708 compute_mode
, label3
);
4709 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4710 quotient
, 0, OPTAB_LIB_WIDEN
);
4711 if (tem
!= quotient
)
4712 emit_move_insn (quotient
, tem
);
4713 emit_jump_insn (gen_jump (label5
));
4715 emit_label (label3
);
4716 expand_inc (adjusted_op0
, const1_rtx
);
4717 emit_label (label4
);
4718 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4719 quotient
, 0, OPTAB_LIB_WIDEN
);
4720 if (tem
!= quotient
)
4721 emit_move_insn (quotient
, tem
);
4722 expand_inc (quotient
, const1_rtx
);
4723 emit_label (label5
);
4728 case EXACT_DIV_EXPR
:
4729 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4731 HOST_WIDE_INT d
= INTVAL (op1
);
4732 unsigned HOST_WIDE_INT ml
;
4736 pre_shift
= floor_log2 (d
& -d
);
4737 ml
= invert_mod2n (d
>> pre_shift
, size
);
4738 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4739 build_int_cst (NULL_TREE
, pre_shift
),
4740 NULL_RTX
, unsignedp
);
4741 quotient
= expand_mult (compute_mode
, t1
,
4742 gen_int_mode (ml
, compute_mode
),
4745 insn
= get_last_insn ();
4746 set_unique_reg_note (insn
,
4748 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
4754 case ROUND_DIV_EXPR
:
4755 case ROUND_MOD_EXPR
:
4760 label
= gen_label_rtx ();
4761 quotient
= gen_reg_rtx (compute_mode
);
4762 remainder
= gen_reg_rtx (compute_mode
);
4763 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
, remainder
, 1) == 0)
4766 quotient
= expand_binop (compute_mode
, udiv_optab
, op0
, op1
,
4767 quotient
, 1, OPTAB_LIB_WIDEN
);
4768 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 1);
4769 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4770 remainder
, 1, OPTAB_LIB_WIDEN
);
4772 tem
= plus_constant (op1
, -1);
4773 tem
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4774 integer_one_node
, NULL_RTX
, 1);
4775 do_cmp_and_jump (remainder
, tem
, LEU
, compute_mode
, label
);
4776 expand_inc (quotient
, const1_rtx
);
4777 expand_dec (remainder
, op1
);
4782 rtx abs_rem
, abs_op1
, tem
, mask
;
4784 label
= gen_label_rtx ();
4785 quotient
= gen_reg_rtx (compute_mode
);
4786 remainder
= gen_reg_rtx (compute_mode
);
4787 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
, remainder
, 0) == 0)
4790 quotient
= expand_binop (compute_mode
, sdiv_optab
, op0
, op1
,
4791 quotient
, 0, OPTAB_LIB_WIDEN
);
4792 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 0);
4793 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4794 remainder
, 0, OPTAB_LIB_WIDEN
);
4796 abs_rem
= expand_abs (compute_mode
, remainder
, NULL_RTX
, 1, 0);
4797 abs_op1
= expand_abs (compute_mode
, op1
, NULL_RTX
, 1, 0);
4798 tem
= expand_shift (LSHIFT_EXPR
, compute_mode
, abs_rem
,
4799 integer_one_node
, NULL_RTX
, 1);
4800 do_cmp_and_jump (tem
, abs_op1
, LTU
, compute_mode
, label
);
4801 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4802 NULL_RTX
, 0, OPTAB_WIDEN
);
4803 mask
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4804 build_int_cst (NULL_TREE
, size
- 1),
4806 tem
= expand_binop (compute_mode
, xor_optab
, mask
, const1_rtx
,
4807 NULL_RTX
, 0, OPTAB_WIDEN
);
4808 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4809 NULL_RTX
, 0, OPTAB_WIDEN
);
4810 expand_inc (quotient
, tem
);
4811 tem
= expand_binop (compute_mode
, xor_optab
, mask
, op1
,
4812 NULL_RTX
, 0, OPTAB_WIDEN
);
4813 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4814 NULL_RTX
, 0, OPTAB_WIDEN
);
4815 expand_dec (remainder
, tem
);
4818 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4826 if (target
&& GET_MODE (target
) != compute_mode
)
4831 /* Try to produce the remainder without producing the quotient.
4832 If we seem to have a divmod pattern that does not require widening,
4833 don't try widening here. We should really have a WIDEN argument
4834 to expand_twoval_binop, since what we'd really like to do here is
4835 1) try a mod insn in compute_mode
4836 2) try a divmod insn in compute_mode
4837 3) try a div insn in compute_mode and multiply-subtract to get
4839 4) try the same things with widening allowed. */
4841 = sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4844 ((optab_handler (optab2
, compute_mode
)
4845 != CODE_FOR_nothing
)
4846 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4849 /* No luck there. Can we do remainder and divide at once
4850 without a library call? */
4851 remainder
= gen_reg_rtx (compute_mode
);
4852 if (! expand_twoval_binop ((unsignedp
4856 NULL_RTX
, remainder
, unsignedp
))
4861 return gen_lowpart (mode
, remainder
);
4864 /* Produce the quotient. Try a quotient insn, but not a library call.
4865 If we have a divmod in this mode, use it in preference to widening
4866 the div (for this test we assume it will not fail). Note that optab2
4867 is set to the one of the two optabs that the call below will use. */
4869 = sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
4870 op0
, op1
, rem_flag
? NULL_RTX
: target
,
4872 ((optab_handler (optab2
, compute_mode
)
4873 != CODE_FOR_nothing
)
4874 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4878 /* No luck there. Try a quotient-and-remainder insn,
4879 keeping the quotient alone. */
4880 quotient
= gen_reg_rtx (compute_mode
);
4881 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
4883 quotient
, NULL_RTX
, unsignedp
))
4887 /* Still no luck. If we are not computing the remainder,
4888 use a library call for the quotient. */
4889 quotient
= sign_expand_binop (compute_mode
,
4890 udiv_optab
, sdiv_optab
,
4892 unsignedp
, OPTAB_LIB_WIDEN
);
4899 if (target
&& GET_MODE (target
) != compute_mode
)
4904 /* No divide instruction either. Use library for remainder. */
4905 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4907 unsignedp
, OPTAB_LIB_WIDEN
);
4908 /* No remainder function. Try a quotient-and-remainder
4909 function, keeping the remainder. */
4912 remainder
= gen_reg_rtx (compute_mode
);
4913 if (!expand_twoval_binop_libfunc
4914 (unsignedp
? udivmod_optab
: sdivmod_optab
,
4916 NULL_RTX
, remainder
,
4917 unsignedp
? UMOD
: MOD
))
4918 remainder
= NULL_RTX
;
4923 /* We divided. Now finish doing X - Y * (X / Y). */
4924 remainder
= expand_mult (compute_mode
, quotient
, op1
,
4925 NULL_RTX
, unsignedp
);
4926 remainder
= expand_binop (compute_mode
, sub_optab
, op0
,
4927 remainder
, target
, unsignedp
,
4932 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4935 /* Return a tree node with data type TYPE, describing the value of X.
4936 Usually this is an VAR_DECL, if there is no obvious better choice.
4937 X may be an expression, however we only support those expressions
4938 generated by loop.c. */
4941 make_tree (tree type
, rtx x
)
4945 switch (GET_CODE (x
))
4949 HOST_WIDE_INT hi
= 0;
4952 && !(TYPE_UNSIGNED (type
)
4953 && (GET_MODE_BITSIZE (TYPE_MODE (type
))
4954 < HOST_BITS_PER_WIDE_INT
)))
4957 t
= build_int_cst_wide (type
, INTVAL (x
), hi
);
4963 if (GET_MODE (x
) == VOIDmode
)
4964 t
= build_int_cst_wide (type
,
4965 CONST_DOUBLE_LOW (x
), CONST_DOUBLE_HIGH (x
));
4970 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
4971 t
= build_real (type
, d
);
4978 int units
= CONST_VECTOR_NUNITS (x
);
4979 tree itype
= TREE_TYPE (type
);
4984 /* Build a tree with vector elements. */
4985 for (i
= units
- 1; i
>= 0; --i
)
4987 rtx elt
= CONST_VECTOR_ELT (x
, i
);
4988 t
= tree_cons (NULL_TREE
, make_tree (itype
, elt
), t
);
4991 return build_vector (type
, t
);
4995 return fold_build2 (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4996 make_tree (type
, XEXP (x
, 1)));
4999 return fold_build2 (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5000 make_tree (type
, XEXP (x
, 1)));
5003 return fold_build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0)));
5006 return fold_build2 (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5007 make_tree (type
, XEXP (x
, 1)));
5010 return fold_build2 (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5011 make_tree (type
, XEXP (x
, 1)));
5014 t
= unsigned_type_for (type
);
5015 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5016 make_tree (t
, XEXP (x
, 0)),
5017 make_tree (type
, XEXP (x
, 1))));
5020 t
= signed_type_for (type
);
5021 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5022 make_tree (t
, XEXP (x
, 0)),
5023 make_tree (type
, XEXP (x
, 1))));
5026 if (TREE_CODE (type
) != REAL_TYPE
)
5027 t
= signed_type_for (type
);
5031 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5032 make_tree (t
, XEXP (x
, 0)),
5033 make_tree (t
, XEXP (x
, 1))));
5035 t
= unsigned_type_for (type
);
5036 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5037 make_tree (t
, XEXP (x
, 0)),
5038 make_tree (t
, XEXP (x
, 1))));
5042 t
= lang_hooks
.types
.type_for_mode (GET_MODE (XEXP (x
, 0)),
5043 GET_CODE (x
) == ZERO_EXTEND
);
5044 return fold_convert (type
, make_tree (t
, XEXP (x
, 0)));
5047 return make_tree (type
, XEXP (x
, 0));
5050 t
= SYMBOL_REF_DECL (x
);
5052 return fold_convert (type
, build_fold_addr_expr (t
));
5053 /* else fall through. */
5056 t
= build_decl (RTL_LOCATION (x
), VAR_DECL
, NULL_TREE
, type
);
5058 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5059 address mode to pointer mode. */
5060 if (POINTER_TYPE_P (type
))
5061 x
= convert_memory_address_addr_space
5062 (TYPE_MODE (type
), x
, TYPE_ADDR_SPACE (TREE_TYPE (type
)));
5064 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5065 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5066 t
->decl_with_rtl
.rtl
= x
;
5072 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5073 and returning TARGET.
5075 If TARGET is 0, a pseudo-register or constant is returned. */
5078 expand_and (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
)
5082 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
5083 tem
= simplify_binary_operation (AND
, mode
, op0
, op1
);
5085 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
5089 else if (tem
!= target
)
5090 emit_move_insn (target
, tem
);
5094 /* Helper function for emit_store_flag. */
5096 emit_cstore (rtx target
, enum insn_code icode
, enum rtx_code code
,
5097 enum machine_mode mode
, enum machine_mode compare_mode
,
5098 int unsignedp
, rtx x
, rtx y
, int normalizep
,
5099 enum machine_mode target_mode
)
5101 rtx op0
, last
, comparison
, subtarget
, pattern
;
5102 enum machine_mode result_mode
= insn_data
[(int) icode
].operand
[0].mode
;
5104 last
= get_last_insn ();
5105 x
= prepare_operand (icode
, x
, 2, mode
, compare_mode
, unsignedp
);
5106 y
= prepare_operand (icode
, y
, 3, mode
, compare_mode
, unsignedp
);
5107 comparison
= gen_rtx_fmt_ee (code
, result_mode
, x
, y
);
5109 || !insn_data
[icode
].operand
[2].predicate
5110 (x
, insn_data
[icode
].operand
[2].mode
)
5111 || !insn_data
[icode
].operand
[3].predicate
5112 (y
, insn_data
[icode
].operand
[3].mode
)
5113 || !insn_data
[icode
].operand
[1].predicate (comparison
, VOIDmode
))
5115 delete_insns_since (last
);
5119 if (target_mode
== VOIDmode
)
5120 target_mode
= result_mode
;
5122 target
= gen_reg_rtx (target_mode
);
5125 || !(insn_data
[(int) icode
].operand
[0].predicate (target
, result_mode
)))
5126 subtarget
= gen_reg_rtx (result_mode
);
5130 pattern
= GEN_FCN (icode
) (subtarget
, comparison
, x
, y
);
5133 emit_insn (pattern
);
5135 /* If we are converting to a wider mode, first convert to
5136 TARGET_MODE, then normalize. This produces better combining
5137 opportunities on machines that have a SIGN_EXTRACT when we are
5138 testing a single bit. This mostly benefits the 68k.
5140 If STORE_FLAG_VALUE does not have the sign bit set when
5141 interpreted in MODE, we can do this conversion as unsigned, which
5142 is usually more efficient. */
5143 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (result_mode
))
5145 convert_move (target
, subtarget
,
5146 (GET_MODE_BITSIZE (result_mode
) <= HOST_BITS_PER_WIDE_INT
)
5147 && 0 == (STORE_FLAG_VALUE
5148 & ((HOST_WIDE_INT
) 1
5149 << (GET_MODE_BITSIZE (result_mode
) -1))));
5151 result_mode
= target_mode
;
5156 /* If we want to keep subexpressions around, don't reuse our last
5161 /* Now normalize to the proper value in MODE. Sometimes we don't
5162 have to do anything. */
5163 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
5165 /* STORE_FLAG_VALUE might be the most negative number, so write
5166 the comparison this way to avoid a compiler-time warning. */
5167 else if (- normalizep
== STORE_FLAG_VALUE
)
5168 op0
= expand_unop (result_mode
, neg_optab
, op0
, subtarget
, 0);
5170 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5171 it hard to use a value of just the sign bit due to ANSI integer
5172 constant typing rules. */
5173 else if (GET_MODE_BITSIZE (result_mode
) <= HOST_BITS_PER_WIDE_INT
5174 && (STORE_FLAG_VALUE
5175 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (result_mode
) - 1))))
5176 op0
= expand_shift (RSHIFT_EXPR
, result_mode
, op0
,
5177 size_int (GET_MODE_BITSIZE (result_mode
) - 1), subtarget
,
5181 gcc_assert (STORE_FLAG_VALUE
& 1);
5183 op0
= expand_and (result_mode
, op0
, const1_rtx
, subtarget
);
5184 if (normalizep
== -1)
5185 op0
= expand_unop (result_mode
, neg_optab
, op0
, op0
, 0);
5188 /* If we were converting to a smaller mode, do the conversion now. */
5189 if (target_mode
!= result_mode
)
5191 convert_move (target
, op0
, 0);
5199 /* A subroutine of emit_store_flag only including "tricks" that do not
5200 need a recursive call. These are kept separate to avoid infinite
5204 emit_store_flag_1 (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5205 enum machine_mode mode
, int unsignedp
, int normalizep
,
5206 enum machine_mode target_mode
)
5209 enum insn_code icode
;
5210 enum machine_mode compare_mode
;
5211 enum mode_class mclass
;
5212 enum rtx_code scode
;
5216 code
= unsigned_condition (code
);
5217 scode
= swap_condition (code
);
5219 /* If one operand is constant, make it the second one. Only do this
5220 if the other operand is not constant as well. */
5222 if (swap_commutative_operands_p (op0
, op1
))
5227 code
= swap_condition (code
);
5230 if (mode
== VOIDmode
)
5231 mode
= GET_MODE (op0
);
5233 /* For some comparisons with 1 and -1, we can convert this to
5234 comparisons with zero. This will often produce more opportunities for
5235 store-flag insns. */
5240 if (op1
== const1_rtx
)
5241 op1
= const0_rtx
, code
= LE
;
5244 if (op1
== constm1_rtx
)
5245 op1
= const0_rtx
, code
= LT
;
5248 if (op1
== const1_rtx
)
5249 op1
= const0_rtx
, code
= GT
;
5252 if (op1
== constm1_rtx
)
5253 op1
= const0_rtx
, code
= GE
;
5256 if (op1
== const1_rtx
)
5257 op1
= const0_rtx
, code
= NE
;
5260 if (op1
== const1_rtx
)
5261 op1
= const0_rtx
, code
= EQ
;
5267 /* If we are comparing a double-word integer with zero or -1, we can
5268 convert the comparison into one involving a single word. */
5269 if (GET_MODE_BITSIZE (mode
) == BITS_PER_WORD
* 2
5270 && GET_MODE_CLASS (mode
) == MODE_INT
5271 && (!MEM_P (op0
) || ! MEM_VOLATILE_P (op0
)))
5273 if ((code
== EQ
|| code
== NE
)
5274 && (op1
== const0_rtx
|| op1
== constm1_rtx
))
5278 /* Do a logical OR or AND of the two words and compare the
5280 op00
= simplify_gen_subreg (word_mode
, op0
, mode
, 0);
5281 op01
= simplify_gen_subreg (word_mode
, op0
, mode
, UNITS_PER_WORD
);
5282 tem
= expand_binop (word_mode
,
5283 op1
== const0_rtx
? ior_optab
: and_optab
,
5284 op00
, op01
, NULL_RTX
, unsignedp
,
5288 tem
= emit_store_flag (NULL_RTX
, code
, tem
, op1
, word_mode
,
5289 unsignedp
, normalizep
);
5291 else if ((code
== LT
|| code
== GE
) && op1
== const0_rtx
)
5295 /* If testing the sign bit, can just test on high word. */
5296 op0h
= simplify_gen_subreg (word_mode
, op0
, mode
,
5297 subreg_highpart_offset (word_mode
,
5299 tem
= emit_store_flag (NULL_RTX
, code
, op0h
, op1
, word_mode
,
5300 unsignedp
, normalizep
);
5307 if (target_mode
== VOIDmode
|| GET_MODE (tem
) == target_mode
)
5310 target
= gen_reg_rtx (target_mode
);
5312 convert_move (target
, tem
,
5313 0 == ((normalizep
? normalizep
: STORE_FLAG_VALUE
)
5314 & ((HOST_WIDE_INT
) 1
5315 << (GET_MODE_BITSIZE (word_mode
) -1))));
5320 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5321 complement of A (for GE) and shifting the sign bit to the low bit. */
5322 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
5323 && GET_MODE_CLASS (mode
) == MODE_INT
5324 && (normalizep
|| STORE_FLAG_VALUE
== 1
5325 || (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5326 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
5327 == ((unsigned HOST_WIDE_INT
) 1
5328 << (GET_MODE_BITSIZE (mode
) - 1))))))
5335 /* If the result is to be wider than OP0, it is best to convert it
5336 first. If it is to be narrower, it is *incorrect* to convert it
5338 else if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (mode
))
5340 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5344 if (target_mode
!= mode
)
5348 op0
= expand_unop (mode
, one_cmpl_optab
, op0
,
5349 ((STORE_FLAG_VALUE
== 1 || normalizep
)
5350 ? 0 : subtarget
), 0);
5352 if (STORE_FLAG_VALUE
== 1 || normalizep
)
5353 /* If we are supposed to produce a 0/1 value, we want to do
5354 a logical shift from the sign bit to the low-order bit; for
5355 a -1/0 value, we do an arithmetic shift. */
5356 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5357 size_int (GET_MODE_BITSIZE (mode
) - 1),
5358 subtarget
, normalizep
!= -1);
5360 if (mode
!= target_mode
)
5361 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5366 mclass
= GET_MODE_CLASS (mode
);
5367 for (compare_mode
= mode
; compare_mode
!= VOIDmode
;
5368 compare_mode
= GET_MODE_WIDER_MODE (compare_mode
))
5370 enum machine_mode optab_mode
= mclass
== MODE_CC
? CCmode
: compare_mode
;
5371 icode
= optab_handler (cstore_optab
, optab_mode
);
5372 if (icode
!= CODE_FOR_nothing
)
5374 do_pending_stack_adjust ();
5375 tem
= emit_cstore (target
, icode
, code
, mode
, compare_mode
,
5376 unsignedp
, op0
, op1
, normalizep
, target_mode
);
5380 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5382 tem
= emit_cstore (target
, icode
, scode
, mode
, compare_mode
,
5383 unsignedp
, op1
, op0
, normalizep
, target_mode
);
5394 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5395 and storing in TARGET. Normally return TARGET.
5396 Return 0 if that cannot be done.
5398 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5399 it is VOIDmode, they cannot both be CONST_INT.
5401 UNSIGNEDP is for the case where we have to widen the operands
5402 to perform the operation. It says to use zero-extension.
5404 NORMALIZEP is 1 if we should convert the result to be either zero
5405 or one. Normalize is -1 if we should convert the result to be
5406 either zero or -1. If NORMALIZEP is zero, the result will be left
5407 "raw" out of the scc insn. */
5410 emit_store_flag (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5411 enum machine_mode mode
, int unsignedp
, int normalizep
)
5413 enum machine_mode target_mode
= target
? GET_MODE (target
) : VOIDmode
;
5414 enum rtx_code rcode
;
5416 rtx tem
, last
, trueval
;
5418 tem
= emit_store_flag_1 (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
,
5423 /* If we reached here, we can't do this with a scc insn, however there
5424 are some comparisons that can be done in other ways. Don't do any
5425 of these cases if branches are very cheap. */
5426 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5429 /* See what we need to return. We can only return a 1, -1, or the
5432 if (normalizep
== 0)
5434 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
5435 normalizep
= STORE_FLAG_VALUE
;
5437 else if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5438 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
5439 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))
5445 last
= get_last_insn ();
5447 /* If optimizing, use different pseudo registers for each insn, instead
5448 of reusing the same pseudo. This leads to better CSE, but slows
5449 down the compiler, since there are more pseudos */
5450 subtarget
= (!optimize
5451 && (target_mode
== mode
)) ? target
: NULL_RTX
;
5452 trueval
= GEN_INT (normalizep
? normalizep
: STORE_FLAG_VALUE
);
5454 /* For floating-point comparisons, try the reverse comparison or try
5455 changing the "orderedness" of the comparison. */
5456 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5458 enum rtx_code first_code
;
5461 rcode
= reverse_condition_maybe_unordered (code
);
5462 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5463 && (code
== ORDERED
|| code
== UNORDERED
5464 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5465 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5467 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5468 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5470 /* For the reverse comparison, use either an addition or a XOR. */
5472 && rtx_cost (GEN_INT (normalizep
), PLUS
,
5473 optimize_insn_for_speed_p ()) == 0)
5475 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5476 STORE_FLAG_VALUE
, target_mode
);
5478 return expand_binop (target_mode
, add_optab
, tem
,
5479 GEN_INT (normalizep
),
5480 target
, 0, OPTAB_WIDEN
);
5483 && rtx_cost (trueval
, XOR
,
5484 optimize_insn_for_speed_p ()) == 0)
5486 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5487 normalizep
, target_mode
);
5489 return expand_binop (target_mode
, xor_optab
, tem
, trueval
,
5490 target
, INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5494 delete_insns_since (last
);
5496 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5497 if (code
== ORDERED
|| code
== UNORDERED
)
5500 and_them
= split_comparison (code
, mode
, &first_code
, &code
);
5502 /* If there are no NaNs, the first comparison should always fall through.
5503 Effectively change the comparison to the other one. */
5504 if (!HONOR_NANS (mode
))
5506 gcc_assert (first_code
== (and_them
? ORDERED
: UNORDERED
));
5507 return emit_store_flag_1 (target
, code
, op0
, op1
, mode
, 0, normalizep
,
5511 #ifdef HAVE_conditional_move
5512 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5513 conditional move. */
5514 tem
= emit_store_flag_1 (subtarget
, first_code
, op0
, op1
, mode
, 0,
5515 normalizep
, target_mode
);
5520 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5521 tem
, const0_rtx
, GET_MODE (tem
), 0);
5523 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5524 trueval
, tem
, GET_MODE (tem
), 0);
5527 delete_insns_since (last
);
5534 /* The remaining tricks only apply to integer comparisons. */
5536 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5539 /* If this is an equality comparison of integers, we can try to exclusive-or
5540 (or subtract) the two operands and use a recursive call to try the
5541 comparison with zero. Don't do any of these cases if branches are
5544 if ((code
== EQ
|| code
== NE
) && op1
!= const0_rtx
)
5546 tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
5550 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
5553 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
5554 mode
, unsignedp
, normalizep
);
5558 delete_insns_since (last
);
5561 /* For integer comparisons, try the reverse comparison. However, for
5562 small X and if we'd have anyway to extend, implementing "X != 0"
5563 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5564 rcode
= reverse_condition (code
);
5565 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5566 && ! (optab_handler (cstore_optab
, mode
) == CODE_FOR_nothing
5568 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
5569 && op1
== const0_rtx
))
5571 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5572 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5574 /* Again, for the reverse comparison, use either an addition or a XOR. */
5576 && rtx_cost (GEN_INT (normalizep
), PLUS
,
5577 optimize_insn_for_speed_p ()) == 0)
5579 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5580 STORE_FLAG_VALUE
, target_mode
);
5582 tem
= expand_binop (target_mode
, add_optab
, tem
,
5583 GEN_INT (normalizep
), target
, 0, OPTAB_WIDEN
);
5586 && rtx_cost (trueval
, XOR
,
5587 optimize_insn_for_speed_p ()) == 0)
5589 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5590 normalizep
, target_mode
);
5592 tem
= expand_binop (target_mode
, xor_optab
, tem
, trueval
, target
,
5593 INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5598 delete_insns_since (last
);
5601 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5602 the constant zero. Reject all other comparisons at this point. Only
5603 do LE and GT if branches are expensive since they are expensive on
5604 2-operand machines. */
5606 if (op1
!= const0_rtx
5607 || (code
!= EQ
&& code
!= NE
5608 && (BRANCH_COST (optimize_insn_for_speed_p (),
5609 false) <= 1 || (code
!= LE
&& code
!= GT
))))
5612 /* Try to put the result of the comparison in the sign bit. Assume we can't
5613 do the necessary operation below. */
5617 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5618 the sign bit set. */
5622 /* This is destructive, so SUBTARGET can't be OP0. */
5623 if (rtx_equal_p (subtarget
, op0
))
5626 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
5629 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
5633 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5634 number of bits in the mode of OP0, minus one. */
5638 if (rtx_equal_p (subtarget
, op0
))
5641 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5642 size_int (GET_MODE_BITSIZE (mode
) - 1),
5644 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
5648 if (code
== EQ
|| code
== NE
)
5650 /* For EQ or NE, one way to do the comparison is to apply an operation
5651 that converts the operand into a positive number if it is nonzero
5652 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5653 for NE we negate. This puts the result in the sign bit. Then we
5654 normalize with a shift, if needed.
5656 Two operations that can do the above actions are ABS and FFS, so try
5657 them. If that doesn't work, and MODE is smaller than a full word,
5658 we can use zero-extension to the wider mode (an unsigned conversion)
5659 as the operation. */
5661 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5662 that is compensated by the subsequent overflow when subtracting
5665 if (optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)
5666 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
5667 else if (optab_handler (ffs_optab
, mode
) != CODE_FOR_nothing
)
5668 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
5669 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
5671 tem
= convert_modes (word_mode
, mode
, op0
, 1);
5678 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
5681 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
5684 /* If we couldn't do it that way, for NE we can "or" the two's complement
5685 of the value with itself. For EQ, we take the one's complement of
5686 that "or", which is an extra insn, so we only handle EQ if branches
5691 || BRANCH_COST (optimize_insn_for_speed_p (),
5694 if (rtx_equal_p (subtarget
, op0
))
5697 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
5698 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
5701 if (tem
&& code
== EQ
)
5702 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
5706 if (tem
&& normalizep
)
5707 tem
= expand_shift (RSHIFT_EXPR
, mode
, tem
,
5708 size_int (GET_MODE_BITSIZE (mode
) - 1),
5709 subtarget
, normalizep
== 1);
5715 else if (GET_MODE (tem
) != target_mode
)
5717 convert_move (target
, tem
, 0);
5720 else if (!subtarget
)
5722 emit_move_insn (target
, tem
);
5727 delete_insns_since (last
);
5732 /* Like emit_store_flag, but always succeeds. */
5735 emit_store_flag_force (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5736 enum machine_mode mode
, int unsignedp
, int normalizep
)
5739 rtx trueval
, falseval
;
5741 /* First see if emit_store_flag can do the job. */
5742 tem
= emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
);
5747 target
= gen_reg_rtx (word_mode
);
5749 /* If this failed, we have to do this with set/compare/jump/set code.
5750 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5751 trueval
= normalizep
? GEN_INT (normalizep
) : const1_rtx
;
5753 && GET_MODE_CLASS (mode
) == MODE_INT
5756 && op1
== const0_rtx
)
5758 label
= gen_label_rtx ();
5759 do_compare_rtx_and_jump (target
, const0_rtx
, EQ
, unsignedp
,
5760 mode
, NULL_RTX
, NULL_RTX
, label
, -1);
5761 emit_move_insn (target
, trueval
);
5767 || reg_mentioned_p (target
, op0
) || reg_mentioned_p (target
, op1
))
5768 target
= gen_reg_rtx (GET_MODE (target
));
5770 /* Jump in the right direction if the target cannot implement CODE
5771 but can jump on its reverse condition. */
5772 falseval
= const0_rtx
;
5773 if (! can_compare_p (code
, mode
, ccp_jump
)
5774 && (! FLOAT_MODE_P (mode
)
5775 || code
== ORDERED
|| code
== UNORDERED
5776 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5777 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5779 enum rtx_code rcode
;
5780 if (FLOAT_MODE_P (mode
))
5781 rcode
= reverse_condition_maybe_unordered (code
);
5783 rcode
= reverse_condition (code
);
5785 /* Canonicalize to UNORDERED for the libcall. */
5786 if (can_compare_p (rcode
, mode
, ccp_jump
)
5787 || (code
== ORDERED
&& ! can_compare_p (ORDERED
, mode
, ccp_jump
)))
5790 trueval
= const0_rtx
;
5795 emit_move_insn (target
, trueval
);
5796 label
= gen_label_rtx ();
5797 do_compare_rtx_and_jump (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
,
5798 NULL_RTX
, label
, -1);
5800 emit_move_insn (target
, falseval
);
5806 /* Perform possibly multi-word comparison and conditional jump to LABEL
5807 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5808 now a thin wrapper around do_compare_rtx_and_jump. */
5811 do_cmp_and_jump (rtx arg1
, rtx arg2
, enum rtx_code op
, enum machine_mode mode
,
5814 int unsignedp
= (op
== LTU
|| op
== LEU
|| op
== GTU
|| op
== GEU
);
5815 do_compare_rtx_and_jump (arg1
, arg2
, op
, unsignedp
, mode
,
5816 NULL_RTX
, NULL_RTX
, label
, -1);