1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
33 #include "insn-config.h"
38 #include "langhooks.h"
42 static void store_fixed_bit_field (rtx
, unsigned HOST_WIDE_INT
,
43 unsigned HOST_WIDE_INT
,
44 unsigned HOST_WIDE_INT
, rtx
);
45 static void store_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
46 unsigned HOST_WIDE_INT
, rtx
);
47 static rtx
extract_fixed_bit_field (enum machine_mode
, rtx
,
48 unsigned HOST_WIDE_INT
,
49 unsigned HOST_WIDE_INT
,
50 unsigned HOST_WIDE_INT
, rtx
, int);
51 static rtx
mask_rtx (enum machine_mode
, int, int, int);
52 static rtx
lshift_value (enum machine_mode
, rtx
, int, int);
53 static rtx
extract_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
54 unsigned HOST_WIDE_INT
, int);
55 static void do_cmp_and_jump (rtx
, rtx
, enum rtx_code
, enum machine_mode
, rtx
);
56 static rtx
expand_smod_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
57 static rtx
expand_sdiv_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
59 /* Test whether a value is zero of a power of two. */
60 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
62 /* Nonzero means divides or modulus operations are relatively cheap for
63 powers of two, so don't use branches; emit the operation instead.
64 Usually, this will mean that the MD file will emit non-branch
67 static bool sdiv_pow2_cheap
[2][NUM_MACHINE_MODES
];
68 static bool smod_pow2_cheap
[2][NUM_MACHINE_MODES
];
70 #ifndef SLOW_UNALIGNED_ACCESS
71 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
74 /* For compilers that support multiple targets with different word sizes,
75 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
76 is the H8/300(H) compiler. */
78 #ifndef MAX_BITS_PER_WORD
79 #define MAX_BITS_PER_WORD BITS_PER_WORD
82 /* Reduce conditional compilation elsewhere. */
85 #define CODE_FOR_insv CODE_FOR_nothing
86 #define gen_insv(a,b,c,d) NULL_RTX
90 #define CODE_FOR_extv CODE_FOR_nothing
91 #define gen_extv(a,b,c,d) NULL_RTX
95 #define CODE_FOR_extzv CODE_FOR_nothing
96 #define gen_extzv(a,b,c,d) NULL_RTX
99 /* Cost of various pieces of RTL. Note that some of these are indexed by
100 shift count and some by mode. */
101 static int zero_cost
[2];
102 static int add_cost
[2][NUM_MACHINE_MODES
];
103 static int neg_cost
[2][NUM_MACHINE_MODES
];
104 static int shift_cost
[2][NUM_MACHINE_MODES
][MAX_BITS_PER_WORD
];
105 static int shiftadd_cost
[2][NUM_MACHINE_MODES
][MAX_BITS_PER_WORD
];
106 static int shiftsub0_cost
[2][NUM_MACHINE_MODES
][MAX_BITS_PER_WORD
];
107 static int shiftsub1_cost
[2][NUM_MACHINE_MODES
][MAX_BITS_PER_WORD
];
108 static int mul_cost
[2][NUM_MACHINE_MODES
];
109 static int sdiv_cost
[2][NUM_MACHINE_MODES
];
110 static int udiv_cost
[2][NUM_MACHINE_MODES
];
111 static int mul_widen_cost
[2][NUM_MACHINE_MODES
];
112 static int mul_highpart_cost
[2][NUM_MACHINE_MODES
];
119 struct rtx_def reg
; rtunion reg_fld
[2];
120 struct rtx_def plus
; rtunion plus_fld1
;
122 struct rtx_def mult
; rtunion mult_fld1
;
123 struct rtx_def sdiv
; rtunion sdiv_fld1
;
124 struct rtx_def udiv
; rtunion udiv_fld1
;
126 struct rtx_def sdiv_32
; rtunion sdiv_32_fld1
;
127 struct rtx_def smod_32
; rtunion smod_32_fld1
;
128 struct rtx_def wide_mult
; rtunion wide_mult_fld1
;
129 struct rtx_def wide_lshr
; rtunion wide_lshr_fld1
;
130 struct rtx_def wide_trunc
;
131 struct rtx_def shift
; rtunion shift_fld1
;
132 struct rtx_def shift_mult
; rtunion shift_mult_fld1
;
133 struct rtx_def shift_add
; rtunion shift_add_fld1
;
134 struct rtx_def shift_sub0
; rtunion shift_sub0_fld1
;
135 struct rtx_def shift_sub1
; rtunion shift_sub1_fld1
;
138 rtx pow2
[MAX_BITS_PER_WORD
];
139 rtx cint
[MAX_BITS_PER_WORD
];
141 enum machine_mode mode
, wider_mode
;
145 for (m
= 1; m
< MAX_BITS_PER_WORD
; m
++)
147 pow2
[m
] = GEN_INT ((HOST_WIDE_INT
) 1 << m
);
148 cint
[m
] = GEN_INT (m
);
150 memset (&all
, 0, sizeof all
);
152 PUT_CODE (&all
.reg
, REG
);
153 /* Avoid using hard regs in ways which may be unsupported. */
154 SET_REGNO (&all
.reg
, LAST_VIRTUAL_REGISTER
+ 1);
156 PUT_CODE (&all
.plus
, PLUS
);
157 XEXP (&all
.plus
, 0) = &all
.reg
;
158 XEXP (&all
.plus
, 1) = &all
.reg
;
160 PUT_CODE (&all
.neg
, NEG
);
161 XEXP (&all
.neg
, 0) = &all
.reg
;
163 PUT_CODE (&all
.mult
, MULT
);
164 XEXP (&all
.mult
, 0) = &all
.reg
;
165 XEXP (&all
.mult
, 1) = &all
.reg
;
167 PUT_CODE (&all
.sdiv
, DIV
);
168 XEXP (&all
.sdiv
, 0) = &all
.reg
;
169 XEXP (&all
.sdiv
, 1) = &all
.reg
;
171 PUT_CODE (&all
.udiv
, UDIV
);
172 XEXP (&all
.udiv
, 0) = &all
.reg
;
173 XEXP (&all
.udiv
, 1) = &all
.reg
;
175 PUT_CODE (&all
.sdiv_32
, DIV
);
176 XEXP (&all
.sdiv_32
, 0) = &all
.reg
;
177 XEXP (&all
.sdiv_32
, 1) = 32 < MAX_BITS_PER_WORD
? cint
[32] : GEN_INT (32);
179 PUT_CODE (&all
.smod_32
, MOD
);
180 XEXP (&all
.smod_32
, 0) = &all
.reg
;
181 XEXP (&all
.smod_32
, 1) = XEXP (&all
.sdiv_32
, 1);
183 PUT_CODE (&all
.zext
, ZERO_EXTEND
);
184 XEXP (&all
.zext
, 0) = &all
.reg
;
186 PUT_CODE (&all
.wide_mult
, MULT
);
187 XEXP (&all
.wide_mult
, 0) = &all
.zext
;
188 XEXP (&all
.wide_mult
, 1) = &all
.zext
;
190 PUT_CODE (&all
.wide_lshr
, LSHIFTRT
);
191 XEXP (&all
.wide_lshr
, 0) = &all
.wide_mult
;
193 PUT_CODE (&all
.wide_trunc
, TRUNCATE
);
194 XEXP (&all
.wide_trunc
, 0) = &all
.wide_lshr
;
196 PUT_CODE (&all
.shift
, ASHIFT
);
197 XEXP (&all
.shift
, 0) = &all
.reg
;
199 PUT_CODE (&all
.shift_mult
, MULT
);
200 XEXP (&all
.shift_mult
, 0) = &all
.reg
;
202 PUT_CODE (&all
.shift_add
, PLUS
);
203 XEXP (&all
.shift_add
, 0) = &all
.shift_mult
;
204 XEXP (&all
.shift_add
, 1) = &all
.reg
;
206 PUT_CODE (&all
.shift_sub0
, MINUS
);
207 XEXP (&all
.shift_sub0
, 0) = &all
.shift_mult
;
208 XEXP (&all
.shift_sub0
, 1) = &all
.reg
;
210 PUT_CODE (&all
.shift_sub1
, MINUS
);
211 XEXP (&all
.shift_sub1
, 0) = &all
.reg
;
212 XEXP (&all
.shift_sub1
, 1) = &all
.shift_mult
;
214 for (speed
= 0; speed
< 2; speed
++)
216 crtl
->maybe_hot_insn_p
= speed
;
217 zero_cost
[speed
] = rtx_cost (const0_rtx
, SET
, speed
);
219 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
221 mode
= GET_MODE_WIDER_MODE (mode
))
223 PUT_MODE (&all
.reg
, mode
);
224 PUT_MODE (&all
.plus
, mode
);
225 PUT_MODE (&all
.neg
, mode
);
226 PUT_MODE (&all
.mult
, mode
);
227 PUT_MODE (&all
.sdiv
, mode
);
228 PUT_MODE (&all
.udiv
, mode
);
229 PUT_MODE (&all
.sdiv_32
, mode
);
230 PUT_MODE (&all
.smod_32
, mode
);
231 PUT_MODE (&all
.wide_trunc
, mode
);
232 PUT_MODE (&all
.shift
, mode
);
233 PUT_MODE (&all
.shift_mult
, mode
);
234 PUT_MODE (&all
.shift_add
, mode
);
235 PUT_MODE (&all
.shift_sub0
, mode
);
236 PUT_MODE (&all
.shift_sub1
, mode
);
238 add_cost
[speed
][mode
] = rtx_cost (&all
.plus
, SET
, speed
);
239 neg_cost
[speed
][mode
] = rtx_cost (&all
.neg
, SET
, speed
);
240 mul_cost
[speed
][mode
] = rtx_cost (&all
.mult
, SET
, speed
);
241 sdiv_cost
[speed
][mode
] = rtx_cost (&all
.sdiv
, SET
, speed
);
242 udiv_cost
[speed
][mode
] = rtx_cost (&all
.udiv
, SET
, speed
);
244 sdiv_pow2_cheap
[speed
][mode
] = (rtx_cost (&all
.sdiv_32
, SET
, speed
)
245 <= 2 * add_cost
[speed
][mode
]);
246 smod_pow2_cheap
[speed
][mode
] = (rtx_cost (&all
.smod_32
, SET
, speed
)
247 <= 4 * add_cost
[speed
][mode
]);
249 wider_mode
= GET_MODE_WIDER_MODE (mode
);
250 if (wider_mode
!= VOIDmode
)
252 PUT_MODE (&all
.zext
, wider_mode
);
253 PUT_MODE (&all
.wide_mult
, wider_mode
);
254 PUT_MODE (&all
.wide_lshr
, wider_mode
);
255 XEXP (&all
.wide_lshr
, 1) = GEN_INT (GET_MODE_BITSIZE (mode
));
257 mul_widen_cost
[speed
][wider_mode
]
258 = rtx_cost (&all
.wide_mult
, SET
, speed
);
259 mul_highpart_cost
[speed
][mode
]
260 = rtx_cost (&all
.wide_trunc
, SET
, speed
);
263 shift_cost
[speed
][mode
][0] = 0;
264 shiftadd_cost
[speed
][mode
][0] = shiftsub0_cost
[speed
][mode
][0]
265 = shiftsub1_cost
[speed
][mode
][0] = add_cost
[speed
][mode
];
267 n
= MIN (MAX_BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
268 for (m
= 1; m
< n
; m
++)
270 XEXP (&all
.shift
, 1) = cint
[m
];
271 XEXP (&all
.shift_mult
, 1) = pow2
[m
];
273 shift_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift
, SET
, speed
);
274 shiftadd_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift_add
, SET
, speed
);
275 shiftsub0_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift_sub0
, SET
, speed
);
276 shiftsub1_cost
[speed
][mode
][m
] = rtx_cost (&all
.shift_sub1
, SET
, speed
);
280 default_rtl_profile ();
283 /* Return an rtx representing minus the value of X.
284 MODE is the intended mode of the result,
285 useful if X is a CONST_INT. */
288 negate_rtx (enum machine_mode mode
, rtx x
)
290 rtx result
= simplify_unary_operation (NEG
, mode
, x
, mode
);
293 result
= expand_unop (mode
, neg_optab
, x
, NULL_RTX
, 0);
298 /* Report on the availability of insv/extv/extzv and the desired mode
299 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
300 is false; else the mode of the specified operand. If OPNO is -1,
301 all the caller cares about is whether the insn is available. */
303 mode_for_extraction (enum extraction_pattern pattern
, int opno
)
305 const struct insn_data
*data
;
312 data
= &insn_data
[CODE_FOR_insv
];
315 return MAX_MACHINE_MODE
;
320 data
= &insn_data
[CODE_FOR_extv
];
323 return MAX_MACHINE_MODE
;
328 data
= &insn_data
[CODE_FOR_extzv
];
331 return MAX_MACHINE_MODE
;
340 /* Everyone who uses this function used to follow it with
341 if (result == VOIDmode) result = word_mode; */
342 if (data
->operand
[opno
].mode
== VOIDmode
)
344 return data
->operand
[opno
].mode
;
347 /* Return true if X, of mode MODE, matches the predicate for operand
348 OPNO of instruction ICODE. Allow volatile memories, regardless of
349 the ambient volatile_ok setting. */
352 check_predicate_volatile_ok (enum insn_code icode
, int opno
,
353 rtx x
, enum machine_mode mode
)
355 bool save_volatile_ok
, result
;
357 save_volatile_ok
= volatile_ok
;
358 result
= insn_data
[(int) icode
].operand
[opno
].predicate (x
, mode
);
359 volatile_ok
= save_volatile_ok
;
363 /* A subroutine of store_bit_field, with the same arguments. Return true
364 if the operation could be implemented.
366 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
367 no other way of implementing the operation. If FALLBACK_P is false,
368 return false instead. */
371 store_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
372 unsigned HOST_WIDE_INT bitnum
, enum machine_mode fieldmode
,
373 rtx value
, bool fallback_p
)
376 = (MEM_P (str_rtx
)) ? BITS_PER_UNIT
: BITS_PER_WORD
;
377 unsigned HOST_WIDE_INT offset
, bitpos
;
382 enum machine_mode op_mode
= mode_for_extraction (EP_insv
, 3);
384 while (GET_CODE (op0
) == SUBREG
)
386 /* The following line once was done only if WORDS_BIG_ENDIAN,
387 but I think that is a mistake. WORDS_BIG_ENDIAN is
388 meaningful at a much higher level; when structures are copied
389 between memory and regs, the higher-numbered regs
390 always get higher addresses. */
391 int inner_mode_size
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)));
392 int outer_mode_size
= GET_MODE_SIZE (GET_MODE (op0
));
396 /* Paradoxical subregs need special handling on big endian machines. */
397 if (SUBREG_BYTE (op0
) == 0 && inner_mode_size
< outer_mode_size
)
399 int difference
= inner_mode_size
- outer_mode_size
;
401 if (WORDS_BIG_ENDIAN
)
402 byte_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
403 if (BYTES_BIG_ENDIAN
)
404 byte_offset
+= difference
% UNITS_PER_WORD
;
407 byte_offset
= SUBREG_BYTE (op0
);
409 bitnum
+= byte_offset
* BITS_PER_UNIT
;
410 op0
= SUBREG_REG (op0
);
413 /* No action is needed if the target is a register and if the field
414 lies completely outside that register. This can occur if the source
415 code contains an out-of-bounds access to a small array. */
416 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
419 /* Use vec_set patterns for inserting parts of vectors whenever
421 if (VECTOR_MODE_P (GET_MODE (op0
))
423 && (optab_handler (vec_set_optab
, GET_MODE (op0
))->insn_code
425 && fieldmode
== GET_MODE_INNER (GET_MODE (op0
))
426 && bitsize
== GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
427 && !(bitnum
% GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
429 enum machine_mode outermode
= GET_MODE (op0
);
430 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
431 int icode
= (int) optab_handler (vec_set_optab
, outermode
)->insn_code
;
432 int pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
433 rtx rtxpos
= GEN_INT (pos
);
437 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
438 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
439 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
443 if (! (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
))
444 src
= copy_to_mode_reg (mode1
, src
);
446 if (! (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
))
447 rtxpos
= copy_to_mode_reg (mode1
, rtxpos
);
449 /* We could handle this, but we should always be called with a pseudo
450 for our targets and all insns should take them as outputs. */
451 gcc_assert ((*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
)
452 && (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
)
453 && (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
));
454 pat
= GEN_FCN (icode
) (dest
, src
, rtxpos
);
465 /* If the target is a register, overwriting the entire object, or storing
466 a full-word or multi-word field can be done with just a SUBREG.
468 If the target is memory, storing any naturally aligned field can be
469 done with a simple store. For targets that support fast unaligned
470 memory, any naturally sized, unit aligned field can be done directly. */
472 offset
= bitnum
/ unit
;
473 bitpos
= bitnum
% unit
;
474 byte_offset
= (bitnum
% BITS_PER_WORD
) / BITS_PER_UNIT
475 + (offset
* UNITS_PER_WORD
);
478 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
480 ? ((GET_MODE_SIZE (fieldmode
) >= UNITS_PER_WORD
481 || GET_MODE_SIZE (GET_MODE (op0
)) == GET_MODE_SIZE (fieldmode
))
482 && byte_offset
% GET_MODE_SIZE (fieldmode
) == 0)
483 : (! SLOW_UNALIGNED_ACCESS (fieldmode
, MEM_ALIGN (op0
))
484 || (offset
* BITS_PER_UNIT
% bitsize
== 0
485 && MEM_ALIGN (op0
) % GET_MODE_BITSIZE (fieldmode
) == 0))))
488 op0
= adjust_address (op0
, fieldmode
, offset
);
489 else if (GET_MODE (op0
) != fieldmode
)
490 op0
= simplify_gen_subreg (fieldmode
, op0
, GET_MODE (op0
),
492 emit_move_insn (op0
, value
);
496 /* Make sure we are playing with integral modes. Pun with subregs
497 if we aren't. This must come after the entire register case above,
498 since that case is valid for any mode. The following cases are only
499 valid for integral modes. */
501 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
502 if (imode
!= GET_MODE (op0
))
505 op0
= adjust_address (op0
, imode
, 0);
508 gcc_assert (imode
!= BLKmode
);
509 op0
= gen_lowpart (imode
, op0
);
514 /* We may be accessing data outside the field, which means
515 we can alias adjacent data. */
518 op0
= shallow_copy_rtx (op0
);
519 set_mem_alias_set (op0
, 0);
520 set_mem_expr (op0
, 0);
523 /* If OP0 is a register, BITPOS must count within a word.
524 But as we have it, it counts within whatever size OP0 now has.
525 On a bigendian machine, these are not the same, so convert. */
528 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
529 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
531 /* Storing an lsb-aligned field in a register
532 can be done with a movestrict instruction. */
535 && (BYTES_BIG_ENDIAN
? bitpos
+ bitsize
== unit
: bitpos
== 0)
536 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
537 && (optab_handler (movstrict_optab
, fieldmode
)->insn_code
538 != CODE_FOR_nothing
))
540 int icode
= optab_handler (movstrict_optab
, fieldmode
)->insn_code
;
542 rtx start
= get_last_insn ();
545 /* Get appropriate low part of the value being stored. */
546 if (CONST_INT_P (value
) || REG_P (value
))
547 value
= gen_lowpart (fieldmode
, value
);
548 else if (!(GET_CODE (value
) == SYMBOL_REF
549 || GET_CODE (value
) == LABEL_REF
550 || GET_CODE (value
) == CONST
))
551 value
= convert_to_mode (fieldmode
, value
, 0);
553 if (! (*insn_data
[icode
].operand
[1].predicate
) (value
, fieldmode
))
554 value
= copy_to_mode_reg (fieldmode
, value
);
556 if (GET_CODE (op0
) == SUBREG
)
558 /* Else we've got some float mode source being extracted into
559 a different float mode destination -- this combination of
560 subregs results in Severe Tire Damage. */
561 gcc_assert (GET_MODE (SUBREG_REG (op0
)) == fieldmode
562 || GET_MODE_CLASS (fieldmode
) == MODE_INT
563 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
);
564 arg0
= SUBREG_REG (op0
);
567 insn
= (GEN_FCN (icode
)
568 (gen_rtx_SUBREG (fieldmode
, arg0
,
569 (bitnum
% BITS_PER_WORD
) / BITS_PER_UNIT
570 + (offset
* UNITS_PER_WORD
)),
577 delete_insns_since (start
);
580 /* Handle fields bigger than a word. */
582 if (bitsize
> BITS_PER_WORD
)
584 /* Here we transfer the words of the field
585 in the order least significant first.
586 This is because the most significant word is the one which may
588 However, only do that if the value is not BLKmode. */
590 unsigned int backwards
= WORDS_BIG_ENDIAN
&& fieldmode
!= BLKmode
;
591 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
595 /* This is the mode we must force value to, so that there will be enough
596 subwords to extract. Note that fieldmode will often (always?) be
597 VOIDmode, because that is what store_field uses to indicate that this
598 is a bit field, but passing VOIDmode to operand_subword_force
600 fieldmode
= GET_MODE (value
);
601 if (fieldmode
== VOIDmode
)
602 fieldmode
= smallest_mode_for_size (nwords
* BITS_PER_WORD
, MODE_INT
);
604 last
= get_last_insn ();
605 for (i
= 0; i
< nwords
; i
++)
607 /* If I is 0, use the low-order word in both field and target;
608 if I is 1, use the next to lowest word; and so on. */
609 unsigned int wordnum
= (backwards
? nwords
- i
- 1 : i
);
610 unsigned int bit_offset
= (backwards
611 ? MAX ((int) bitsize
- ((int) i
+ 1)
614 : (int) i
* BITS_PER_WORD
);
615 rtx value_word
= operand_subword_force (value
, wordnum
, fieldmode
);
617 if (!store_bit_field_1 (op0
, MIN (BITS_PER_WORD
,
618 bitsize
- i
* BITS_PER_WORD
),
619 bitnum
+ bit_offset
, word_mode
,
620 value_word
, fallback_p
))
622 delete_insns_since (last
);
629 /* From here on we can assume that the field to be stored in is
630 a full-word (whatever type that is), since it is shorter than a word. */
632 /* OFFSET is the number of words or bytes (UNIT says which)
633 from STR_RTX to the first word or byte containing part of the field. */
638 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
642 /* Since this is a destination (lvalue), we can't copy
643 it to a pseudo. We can remove a SUBREG that does not
644 change the size of the operand. Such a SUBREG may
645 have been added above. */
646 gcc_assert (GET_CODE (op0
) == SUBREG
647 && (GET_MODE_SIZE (GET_MODE (op0
))
648 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)))));
649 op0
= SUBREG_REG (op0
);
651 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
652 op0
, (offset
* UNITS_PER_WORD
));
657 /* If VALUE has a floating-point or complex mode, access it as an
658 integer of the corresponding size. This can occur on a machine
659 with 64 bit registers that uses SFmode for float. It can also
660 occur for unaligned float or complex fields. */
662 if (GET_MODE (value
) != VOIDmode
663 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_INT
664 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_PARTIAL_INT
)
666 value
= gen_reg_rtx (int_mode_for_mode (GET_MODE (value
)));
667 emit_move_insn (gen_lowpart (GET_MODE (orig_value
), value
), orig_value
);
670 /* Now OFFSET is nonzero only if OP0 is memory
671 and is therefore always measured in bytes. */
674 && GET_MODE (value
) != BLKmode
676 && GET_MODE_BITSIZE (op_mode
) >= bitsize
677 && ! ((REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
678 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (op_mode
)))
679 && insn_data
[CODE_FOR_insv
].operand
[1].predicate (GEN_INT (bitsize
),
681 && check_predicate_volatile_ok (CODE_FOR_insv
, 0, op0
, VOIDmode
))
683 int xbitpos
= bitpos
;
686 rtx last
= get_last_insn ();
688 bool copy_back
= false;
690 /* Add OFFSET into OP0's address. */
692 xop0
= adjust_address (xop0
, byte_mode
, offset
);
694 /* If xop0 is a register, we need it in OP_MODE
695 to make it acceptable to the format of insv. */
696 if (GET_CODE (xop0
) == SUBREG
)
697 /* We can't just change the mode, because this might clobber op0,
698 and we will need the original value of op0 if insv fails. */
699 xop0
= gen_rtx_SUBREG (op_mode
, SUBREG_REG (xop0
), SUBREG_BYTE (xop0
));
700 if (REG_P (xop0
) && GET_MODE (xop0
) != op_mode
)
701 xop0
= gen_lowpart_SUBREG (op_mode
, xop0
);
703 /* If the destination is a paradoxical subreg such that we need a
704 truncate to the inner mode, perform the insertion on a temporary and
705 truncate the result to the original destination. Note that we can't
706 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
707 X) 0)) is (reg:N X). */
708 if (GET_CODE (xop0
) == SUBREG
709 && REG_P (SUBREG_REG (xop0
))
710 && (!TRULY_NOOP_TRUNCATION
711 (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (xop0
))),
712 GET_MODE_BITSIZE (op_mode
))))
714 rtx tem
= gen_reg_rtx (op_mode
);
715 emit_move_insn (tem
, xop0
);
720 /* On big-endian machines, we count bits from the most significant.
721 If the bit field insn does not, we must invert. */
723 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
724 xbitpos
= unit
- bitsize
- xbitpos
;
726 /* We have been counting XBITPOS within UNIT.
727 Count instead within the size of the register. */
728 if (BITS_BIG_ENDIAN
&& !MEM_P (xop0
))
729 xbitpos
+= GET_MODE_BITSIZE (op_mode
) - unit
;
731 unit
= GET_MODE_BITSIZE (op_mode
);
733 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
735 if (GET_MODE (value
) != op_mode
)
737 if (GET_MODE_BITSIZE (GET_MODE (value
)) >= bitsize
)
739 /* Optimization: Don't bother really extending VALUE
740 if it has all the bits we will actually use. However,
741 if we must narrow it, be sure we do it correctly. */
743 if (GET_MODE_SIZE (GET_MODE (value
)) < GET_MODE_SIZE (op_mode
))
747 tmp
= simplify_subreg (op_mode
, value1
, GET_MODE (value
), 0);
749 tmp
= simplify_gen_subreg (op_mode
,
750 force_reg (GET_MODE (value
),
752 GET_MODE (value
), 0);
756 value1
= gen_lowpart (op_mode
, value1
);
758 else if (CONST_INT_P (value
))
759 value1
= gen_int_mode (INTVAL (value
), op_mode
);
761 /* Parse phase is supposed to make VALUE's data type
762 match that of the component reference, which is a type
763 at least as wide as the field; so VALUE should have
764 a mode that corresponds to that type. */
765 gcc_assert (CONSTANT_P (value
));
768 /* If this machine's insv insists on a register,
769 get VALUE1 into a register. */
770 if (! ((*insn_data
[(int) CODE_FOR_insv
].operand
[3].predicate
)
772 value1
= force_reg (op_mode
, value1
);
774 pat
= gen_insv (xop0
, GEN_INT (bitsize
), GEN_INT (xbitpos
), value1
);
780 convert_move (op0
, xop0
, true);
783 delete_insns_since (last
);
786 /* If OP0 is a memory, try copying it to a register and seeing if a
787 cheap register alternative is available. */
788 if (HAVE_insv
&& MEM_P (op0
))
790 enum machine_mode bestmode
;
792 /* Get the mode to use for inserting into this field. If OP0 is
793 BLKmode, get the smallest mode consistent with the alignment. If
794 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
795 mode. Otherwise, use the smallest mode containing the field. */
797 if (GET_MODE (op0
) == BLKmode
798 || (op_mode
!= MAX_MACHINE_MODE
799 && GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (op_mode
)))
800 bestmode
= get_best_mode (bitsize
, bitnum
, MEM_ALIGN (op0
),
801 (op_mode
== MAX_MACHINE_MODE
802 ? VOIDmode
: op_mode
),
803 MEM_VOLATILE_P (op0
));
805 bestmode
= GET_MODE (op0
);
807 if (bestmode
!= VOIDmode
808 && GET_MODE_SIZE (bestmode
) >= GET_MODE_SIZE (fieldmode
)
809 && !(SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (op0
))
810 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (op0
)))
812 rtx last
, tempreg
, xop0
;
813 unsigned HOST_WIDE_INT xoffset
, xbitpos
;
815 last
= get_last_insn ();
817 /* Adjust address to point to the containing unit of
818 that mode. Compute the offset as a multiple of this unit,
819 counting in bytes. */
820 unit
= GET_MODE_BITSIZE (bestmode
);
821 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
822 xbitpos
= bitnum
% unit
;
823 xop0
= adjust_address (op0
, bestmode
, xoffset
);
825 /* Fetch that unit, store the bitfield in it, then store
827 tempreg
= copy_to_reg (xop0
);
828 if (store_bit_field_1 (tempreg
, bitsize
, xbitpos
,
829 fieldmode
, orig_value
, false))
831 emit_move_insn (xop0
, tempreg
);
834 delete_insns_since (last
);
841 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
);
845 /* Generate code to store value from rtx VALUE
846 into a bit-field within structure STR_RTX
847 containing BITSIZE bits starting at bit BITNUM.
848 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
851 store_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
852 unsigned HOST_WIDE_INT bitnum
, enum machine_mode fieldmode
,
855 if (!store_bit_field_1 (str_rtx
, bitsize
, bitnum
, fieldmode
, value
, true))
859 /* Use shifts and boolean operations to store VALUE
860 into a bit field of width BITSIZE
861 in a memory location specified by OP0 except offset by OFFSET bytes.
862 (OFFSET must be 0 if OP0 is a register.)
863 The field starts at position BITPOS within the byte.
864 (If OP0 is a register, it may be a full word or a narrower mode,
865 but BITPOS still counts within a full word,
866 which is significant on bigendian machines.) */
869 store_fixed_bit_field (rtx op0
, unsigned HOST_WIDE_INT offset
,
870 unsigned HOST_WIDE_INT bitsize
,
871 unsigned HOST_WIDE_INT bitpos
, rtx value
)
873 enum machine_mode mode
;
874 unsigned int total_bits
= BITS_PER_WORD
;
879 /* There is a case not handled here:
880 a structure with a known alignment of just a halfword
881 and a field split across two aligned halfwords within the structure.
882 Or likewise a structure with a known alignment of just a byte
883 and a field split across two bytes.
884 Such cases are not supposed to be able to occur. */
886 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
888 gcc_assert (!offset
);
889 /* Special treatment for a bit field split across two registers. */
890 if (bitsize
+ bitpos
> BITS_PER_WORD
)
892 store_split_bit_field (op0
, bitsize
, bitpos
, value
);
898 /* Get the proper mode to use for this field. We want a mode that
899 includes the entire field. If such a mode would be larger than
900 a word, we won't be doing the extraction the normal way.
901 We don't want a mode bigger than the destination. */
903 mode
= GET_MODE (op0
);
904 if (GET_MODE_BITSIZE (mode
) == 0
905 || GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (word_mode
))
907 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
908 MEM_ALIGN (op0
), mode
, MEM_VOLATILE_P (op0
));
910 if (mode
== VOIDmode
)
912 /* The only way this should occur is if the field spans word
914 store_split_bit_field (op0
, bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
919 total_bits
= GET_MODE_BITSIZE (mode
);
921 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
922 be in the range 0 to total_bits-1, and put any excess bytes in
924 if (bitpos
>= total_bits
)
926 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
927 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
931 /* Get ref to an aligned byte, halfword, or word containing the field.
932 Adjust BITPOS to be position within a word,
933 and OFFSET to be the offset of that word.
934 Then alter OP0 to refer to that word. */
935 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
936 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
937 op0
= adjust_address (op0
, mode
, offset
);
940 mode
= GET_MODE (op0
);
942 /* Now MODE is either some integral mode for a MEM as OP0,
943 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
944 The bit field is contained entirely within OP0.
945 BITPOS is the starting bit number within OP0.
946 (OP0's mode may actually be narrower than MODE.) */
948 if (BYTES_BIG_ENDIAN
)
949 /* BITPOS is the distance between our msb
950 and that of the containing datum.
951 Convert it to the distance from the lsb. */
952 bitpos
= total_bits
- bitsize
- bitpos
;
954 /* Now BITPOS is always the distance between our lsb
957 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
958 we must first convert its mode to MODE. */
960 if (CONST_INT_P (value
))
962 HOST_WIDE_INT v
= INTVAL (value
);
964 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
965 v
&= ((HOST_WIDE_INT
) 1 << bitsize
) - 1;
969 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
970 && v
== ((HOST_WIDE_INT
) 1 << bitsize
) - 1)
971 || (bitsize
== HOST_BITS_PER_WIDE_INT
&& v
== -1))
974 value
= lshift_value (mode
, value
, bitpos
, bitsize
);
978 int must_and
= (GET_MODE_BITSIZE (GET_MODE (value
)) != bitsize
979 && bitpos
+ bitsize
!= GET_MODE_BITSIZE (mode
));
981 if (GET_MODE (value
) != mode
)
982 value
= convert_to_mode (mode
, value
, 1);
985 value
= expand_binop (mode
, and_optab
, value
,
986 mask_rtx (mode
, 0, bitsize
, 0),
987 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
989 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
990 build_int_cst (NULL_TREE
, bitpos
), NULL_RTX
, 1);
993 /* Now clear the chosen bits in OP0,
994 except that if VALUE is -1 we need not bother. */
995 /* We keep the intermediates in registers to allow CSE to combine
996 consecutive bitfield assignments. */
998 temp
= force_reg (mode
, op0
);
1002 temp
= expand_binop (mode
, and_optab
, temp
,
1003 mask_rtx (mode
, bitpos
, bitsize
, 1),
1004 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1005 temp
= force_reg (mode
, temp
);
1008 /* Now logical-or VALUE into OP0, unless it is zero. */
1012 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
1013 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1014 temp
= force_reg (mode
, temp
);
1019 op0
= copy_rtx (op0
);
1020 emit_move_insn (op0
, temp
);
1024 /* Store a bit field that is split across multiple accessible memory objects.
1026 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1027 BITSIZE is the field width; BITPOS the position of its first bit
1029 VALUE is the value to store.
1031 This does not yet handle fields wider than BITS_PER_WORD. */
1034 store_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1035 unsigned HOST_WIDE_INT bitpos
, rtx value
)
1038 unsigned int bitsdone
= 0;
1040 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1042 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1043 unit
= BITS_PER_WORD
;
1045 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1047 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1048 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1049 that VALUE might be a floating-point constant. */
1050 if (CONSTANT_P (value
) && !CONST_INT_P (value
))
1052 rtx word
= gen_lowpart_common (word_mode
, value
);
1054 if (word
&& (value
!= word
))
1057 value
= gen_lowpart_common (word_mode
,
1058 force_reg (GET_MODE (value
) != VOIDmode
1060 : word_mode
, value
));
1063 while (bitsdone
< bitsize
)
1065 unsigned HOST_WIDE_INT thissize
;
1067 unsigned HOST_WIDE_INT thispos
;
1068 unsigned HOST_WIDE_INT offset
;
1070 offset
= (bitpos
+ bitsdone
) / unit
;
1071 thispos
= (bitpos
+ bitsdone
) % unit
;
1073 /* THISSIZE must not overrun a word boundary. Otherwise,
1074 store_fixed_bit_field will call us again, and we will mutually
1076 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1077 thissize
= MIN (thissize
, unit
- thispos
);
1079 if (BYTES_BIG_ENDIAN
)
1083 /* We must do an endian conversion exactly the same way as it is
1084 done in extract_bit_field, so that the two calls to
1085 extract_fixed_bit_field will have comparable arguments. */
1086 if (!MEM_P (value
) || GET_MODE (value
) == BLKmode
)
1087 total_bits
= BITS_PER_WORD
;
1089 total_bits
= GET_MODE_BITSIZE (GET_MODE (value
));
1091 /* Fetch successively less significant portions. */
1092 if (CONST_INT_P (value
))
1093 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1094 >> (bitsize
- bitsdone
- thissize
))
1095 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1097 /* The args are chosen so that the last part includes the
1098 lsb. Give extract_bit_field the value it needs (with
1099 endianness compensation) to fetch the piece we want. */
1100 part
= extract_fixed_bit_field (word_mode
, value
, 0, thissize
,
1101 total_bits
- bitsize
+ bitsdone
,
1106 /* Fetch successively more significant portions. */
1107 if (CONST_INT_P (value
))
1108 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1110 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1112 part
= extract_fixed_bit_field (word_mode
, value
, 0, thissize
,
1113 bitsdone
, NULL_RTX
, 1);
1116 /* If OP0 is a register, then handle OFFSET here.
1118 When handling multiword bitfields, extract_bit_field may pass
1119 down a word_mode SUBREG of a larger REG for a bitfield that actually
1120 crosses a word boundary. Thus, for a SUBREG, we must find
1121 the current word starting from the base register. */
1122 if (GET_CODE (op0
) == SUBREG
)
1124 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1125 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1126 GET_MODE (SUBREG_REG (op0
)));
1129 else if (REG_P (op0
))
1131 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1137 /* OFFSET is in UNITs, and UNIT is in bits.
1138 store_fixed_bit_field wants offset in bytes. */
1139 store_fixed_bit_field (word
, offset
* unit
/ BITS_PER_UNIT
, thissize
,
1141 bitsdone
+= thissize
;
1145 /* A subroutine of extract_bit_field_1 that converts return value X
1146 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1147 to extract_bit_field. */
1150 convert_extracted_bit_field (rtx x
, enum machine_mode mode
,
1151 enum machine_mode tmode
, bool unsignedp
)
1153 if (GET_MODE (x
) == tmode
|| GET_MODE (x
) == mode
)
1156 /* If the x mode is not a scalar integral, first convert to the
1157 integer mode of that size and then access it as a floating-point
1158 value via a SUBREG. */
1159 if (!SCALAR_INT_MODE_P (tmode
))
1161 enum machine_mode smode
;
1163 smode
= mode_for_size (GET_MODE_BITSIZE (tmode
), MODE_INT
, 0);
1164 x
= convert_to_mode (smode
, x
, unsignedp
);
1165 x
= force_reg (smode
, x
);
1166 return gen_lowpart (tmode
, x
);
1169 return convert_to_mode (tmode
, x
, unsignedp
);
1172 /* A subroutine of extract_bit_field, with the same arguments.
1173 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1174 if we can find no other means of implementing the operation.
1175 if FALLBACK_P is false, return NULL instead. */
1178 extract_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1179 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, rtx target
,
1180 enum machine_mode mode
, enum machine_mode tmode
,
1184 = (MEM_P (str_rtx
)) ? BITS_PER_UNIT
: BITS_PER_WORD
;
1185 unsigned HOST_WIDE_INT offset
, bitpos
;
1187 enum machine_mode int_mode
;
1188 enum machine_mode ext_mode
;
1189 enum machine_mode mode1
;
1190 enum insn_code icode
;
1193 if (tmode
== VOIDmode
)
1196 while (GET_CODE (op0
) == SUBREG
)
1198 bitnum
+= SUBREG_BYTE (op0
) * BITS_PER_UNIT
;
1199 op0
= SUBREG_REG (op0
);
1202 /* If we have an out-of-bounds access to a register, just return an
1203 uninitialized register of the required mode. This can occur if the
1204 source code contains an out-of-bounds access to a small array. */
1205 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
1206 return gen_reg_rtx (tmode
);
1209 && mode
== GET_MODE (op0
)
1211 && bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
1213 /* We're trying to extract a full register from itself. */
1217 /* See if we can get a better vector mode before extracting. */
1218 if (VECTOR_MODE_P (GET_MODE (op0
))
1220 && GET_MODE_INNER (GET_MODE (op0
)) != tmode
)
1222 enum machine_mode new_mode
;
1223 int nunits
= GET_MODE_NUNITS (GET_MODE (op0
));
1225 if (GET_MODE_CLASS (tmode
) == MODE_FLOAT
)
1226 new_mode
= MIN_MODE_VECTOR_FLOAT
;
1227 else if (GET_MODE_CLASS (tmode
) == MODE_FRACT
)
1228 new_mode
= MIN_MODE_VECTOR_FRACT
;
1229 else if (GET_MODE_CLASS (tmode
) == MODE_UFRACT
)
1230 new_mode
= MIN_MODE_VECTOR_UFRACT
;
1231 else if (GET_MODE_CLASS (tmode
) == MODE_ACCUM
)
1232 new_mode
= MIN_MODE_VECTOR_ACCUM
;
1233 else if (GET_MODE_CLASS (tmode
) == MODE_UACCUM
)
1234 new_mode
= MIN_MODE_VECTOR_UACCUM
;
1236 new_mode
= MIN_MODE_VECTOR_INT
;
1238 for (; new_mode
!= VOIDmode
; new_mode
= GET_MODE_WIDER_MODE (new_mode
))
1239 if (GET_MODE_NUNITS (new_mode
) == nunits
1240 && GET_MODE_SIZE (new_mode
) == GET_MODE_SIZE (GET_MODE (op0
))
1241 && targetm
.vector_mode_supported_p (new_mode
))
1243 if (new_mode
!= VOIDmode
)
1244 op0
= gen_lowpart (new_mode
, op0
);
1247 /* Use vec_extract patterns for extracting parts of vectors whenever
1249 if (VECTOR_MODE_P (GET_MODE (op0
))
1251 && (optab_handler (vec_extract_optab
, GET_MODE (op0
))->insn_code
1252 != CODE_FOR_nothing
)
1253 && ((bitnum
+ bitsize
- 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
1254 == bitnum
/ GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
1256 enum machine_mode outermode
= GET_MODE (op0
);
1257 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
1258 int icode
= (int) optab_handler (vec_extract_optab
, outermode
)->insn_code
;
1259 unsigned HOST_WIDE_INT pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
1260 rtx rtxpos
= GEN_INT (pos
);
1262 rtx dest
= NULL
, pat
, seq
;
1263 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
1264 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
1265 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
1267 if (innermode
== tmode
|| innermode
== mode
)
1271 dest
= gen_reg_rtx (innermode
);
1275 if (! (*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
))
1276 dest
= copy_to_mode_reg (mode0
, dest
);
1278 if (! (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
))
1279 src
= copy_to_mode_reg (mode1
, src
);
1281 if (! (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
))
1282 rtxpos
= copy_to_mode_reg (mode1
, rtxpos
);
1284 /* We could handle this, but we should always be called with a pseudo
1285 for our targets and all insns should take them as outputs. */
1286 gcc_assert ((*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
)
1287 && (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
)
1288 && (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
));
1290 pat
= GEN_FCN (icode
) (dest
, src
, rtxpos
);
1298 return gen_lowpart (tmode
, dest
);
1303 /* Make sure we are playing with integral modes. Pun with subregs
1306 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
1307 if (imode
!= GET_MODE (op0
))
1310 op0
= adjust_address (op0
, imode
, 0);
1311 else if (imode
!= BLKmode
)
1313 op0
= gen_lowpart (imode
, op0
);
1315 /* If we got a SUBREG, force it into a register since we
1316 aren't going to be able to do another SUBREG on it. */
1317 if (GET_CODE (op0
) == SUBREG
)
1318 op0
= force_reg (imode
, op0
);
1320 else if (REG_P (op0
))
1323 imode
= smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0
)),
1325 reg
= gen_reg_rtx (imode
);
1326 subreg
= gen_lowpart_SUBREG (GET_MODE (op0
), reg
);
1327 emit_move_insn (subreg
, op0
);
1329 bitnum
+= SUBREG_BYTE (subreg
) * BITS_PER_UNIT
;
1333 rtx mem
= assign_stack_temp (GET_MODE (op0
),
1334 GET_MODE_SIZE (GET_MODE (op0
)), 0);
1335 emit_move_insn (mem
, op0
);
1336 op0
= adjust_address (mem
, BLKmode
, 0);
1341 /* We may be accessing data outside the field, which means
1342 we can alias adjacent data. */
1345 op0
= shallow_copy_rtx (op0
);
1346 set_mem_alias_set (op0
, 0);
1347 set_mem_expr (op0
, 0);
1350 /* Extraction of a full-word or multi-word value from a structure
1351 in a register or aligned memory can be done with just a SUBREG.
1352 A subword value in the least significant part of a register
1353 can also be extracted with a SUBREG. For this, we need the
1354 byte offset of the value in op0. */
1356 bitpos
= bitnum
% unit
;
1357 offset
= bitnum
/ unit
;
1358 byte_offset
= bitpos
/ BITS_PER_UNIT
+ offset
* UNITS_PER_WORD
;
1360 /* If OP0 is a register, BITPOS must count within a word.
1361 But as we have it, it counts within whatever size OP0 now has.
1362 On a bigendian machine, these are not the same, so convert. */
1363 if (BYTES_BIG_ENDIAN
1365 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
1366 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
1368 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1369 If that's wrong, the solution is to test for it and set TARGET to 0
1372 /* Only scalar integer modes can be converted via subregs. There is an
1373 additional problem for FP modes here in that they can have a precision
1374 which is different from the size. mode_for_size uses precision, but
1375 we want a mode based on the size, so we must avoid calling it for FP
1377 mode1
= (SCALAR_INT_MODE_P (tmode
)
1378 ? mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0)
1381 if (((bitsize
>= BITS_PER_WORD
&& bitsize
== GET_MODE_BITSIZE (mode
)
1382 && bitpos
% BITS_PER_WORD
== 0)
1383 || (mode1
!= BLKmode
1384 /* ??? The big endian test here is wrong. This is correct
1385 if the value is in a register, and if mode_for_size is not
1386 the same mode as op0. This causes us to get unnecessarily
1387 inefficient code from the Thumb port when -mbig-endian. */
1388 && (BYTES_BIG_ENDIAN
1389 ? bitpos
+ bitsize
== BITS_PER_WORD
1392 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1
),
1393 GET_MODE_BITSIZE (GET_MODE (op0
)))
1394 && GET_MODE_SIZE (mode1
) != 0
1395 && byte_offset
% GET_MODE_SIZE (mode1
) == 0)
1397 && (! SLOW_UNALIGNED_ACCESS (mode
, MEM_ALIGN (op0
))
1398 || (offset
* BITS_PER_UNIT
% bitsize
== 0
1399 && MEM_ALIGN (op0
) % bitsize
== 0)))))
1402 op0
= adjust_address (op0
, mode1
, offset
);
1403 else if (mode1
!= GET_MODE (op0
))
1405 rtx sub
= simplify_gen_subreg (mode1
, op0
, GET_MODE (op0
),
1408 goto no_subreg_mode_swap
;
1412 return convert_to_mode (tmode
, op0
, unsignedp
);
1415 no_subreg_mode_swap
:
1417 /* Handle fields bigger than a word. */
1419 if (bitsize
> BITS_PER_WORD
)
1421 /* Here we transfer the words of the field
1422 in the order least significant first.
1423 This is because the most significant word is the one which may
1424 be less than full. */
1426 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
1429 if (target
== 0 || !REG_P (target
))
1430 target
= gen_reg_rtx (mode
);
1432 /* Indicate for flow that the entire target reg is being set. */
1433 emit_clobber (target
);
1435 for (i
= 0; i
< nwords
; i
++)
1437 /* If I is 0, use the low-order word in both field and target;
1438 if I is 1, use the next to lowest word; and so on. */
1439 /* Word number in TARGET to use. */
1440 unsigned int wordnum
1442 ? GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
- i
- 1
1444 /* Offset from start of field in OP0. */
1445 unsigned int bit_offset
= (WORDS_BIG_ENDIAN
1446 ? MAX (0, ((int) bitsize
- ((int) i
+ 1)
1447 * (int) BITS_PER_WORD
))
1448 : (int) i
* BITS_PER_WORD
);
1449 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1451 = extract_bit_field (op0
, MIN (BITS_PER_WORD
,
1452 bitsize
- i
* BITS_PER_WORD
),
1453 bitnum
+ bit_offset
, 1, target_part
, mode
,
1456 gcc_assert (target_part
);
1458 if (result_part
!= target_part
)
1459 emit_move_insn (target_part
, result_part
);
1464 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1465 need to be zero'd out. */
1466 if (GET_MODE_SIZE (GET_MODE (target
)) > nwords
* UNITS_PER_WORD
)
1468 unsigned int i
, total_words
;
1470 total_words
= GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
;
1471 for (i
= nwords
; i
< total_words
; i
++)
1473 (operand_subword (target
,
1474 WORDS_BIG_ENDIAN
? total_words
- i
- 1 : i
,
1481 /* Signed bit field: sign-extend with two arithmetic shifts. */
1482 target
= expand_shift (LSHIFT_EXPR
, mode
, target
,
1483 build_int_cst (NULL_TREE
,
1484 GET_MODE_BITSIZE (mode
) - bitsize
),
1486 return expand_shift (RSHIFT_EXPR
, mode
, target
,
1487 build_int_cst (NULL_TREE
,
1488 GET_MODE_BITSIZE (mode
) - bitsize
),
1492 /* From here on we know the desired field is smaller than a word. */
1494 /* Check if there is a correspondingly-sized integer field, so we can
1495 safely extract it as one size of integer, if necessary; then
1496 truncate or extend to the size that is wanted; then use SUBREGs or
1497 convert_to_mode to get one of the modes we really wanted. */
1499 int_mode
= int_mode_for_mode (tmode
);
1500 if (int_mode
== BLKmode
)
1501 int_mode
= int_mode_for_mode (mode
);
1502 /* Should probably push op0 out to memory and then do a load. */
1503 gcc_assert (int_mode
!= BLKmode
);
1505 /* OFFSET is the number of words or bytes (UNIT says which)
1506 from STR_RTX to the first word or byte containing part of the field. */
1510 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
1513 op0
= copy_to_reg (op0
);
1514 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
1515 op0
, (offset
* UNITS_PER_WORD
));
1520 /* Now OFFSET is nonzero only for memory operands. */
1521 ext_mode
= mode_for_extraction (unsignedp
? EP_extzv
: EP_extv
, 0);
1522 icode
= unsignedp
? CODE_FOR_extzv
: CODE_FOR_extv
;
1523 if (ext_mode
!= MAX_MACHINE_MODE
1525 && GET_MODE_BITSIZE (ext_mode
) >= bitsize
1526 /* If op0 is a register, we need it in EXT_MODE to make it
1527 acceptable to the format of ext(z)v. */
1528 && !(GET_CODE (op0
) == SUBREG
&& GET_MODE (op0
) != ext_mode
)
1529 && !((REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1530 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (ext_mode
)))
1531 && check_predicate_volatile_ok (icode
, 1, op0
, GET_MODE (op0
)))
1533 unsigned HOST_WIDE_INT xbitpos
= bitpos
, xoffset
= offset
;
1534 rtx bitsize_rtx
, bitpos_rtx
;
1535 rtx last
= get_last_insn ();
1537 rtx xtarget
= target
;
1538 rtx xspec_target
= target
;
1539 rtx xspec_target_subreg
= 0;
1542 /* If op0 is a register, we need it in EXT_MODE to make it
1543 acceptable to the format of ext(z)v. */
1544 if (REG_P (xop0
) && GET_MODE (xop0
) != ext_mode
)
1545 xop0
= gen_lowpart_SUBREG (ext_mode
, xop0
);
1547 /* Get ref to first byte containing part of the field. */
1548 xop0
= adjust_address (xop0
, byte_mode
, xoffset
);
1550 /* On big-endian machines, we count bits from the most significant.
1551 If the bit field insn does not, we must invert. */
1552 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1553 xbitpos
= unit
- bitsize
- xbitpos
;
1555 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1556 if (BITS_BIG_ENDIAN
&& !MEM_P (xop0
))
1557 xbitpos
+= GET_MODE_BITSIZE (ext_mode
) - unit
;
1559 unit
= GET_MODE_BITSIZE (ext_mode
);
1562 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1564 if (GET_MODE (xtarget
) != ext_mode
)
1566 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1567 between the mode of the extraction (word_mode) and the target
1568 mode. Instead, create a temporary and use convert_move to set
1571 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (xtarget
)),
1572 GET_MODE_BITSIZE (ext_mode
)))
1574 xtarget
= gen_lowpart (ext_mode
, xtarget
);
1575 if (GET_MODE_SIZE (ext_mode
)
1576 > GET_MODE_SIZE (GET_MODE (xspec_target
)))
1577 xspec_target_subreg
= xtarget
;
1580 xtarget
= gen_reg_rtx (ext_mode
);
1583 /* If this machine's ext(z)v insists on a register target,
1584 make sure we have one. */
1585 if (!insn_data
[(int) icode
].operand
[0].predicate (xtarget
, ext_mode
))
1586 xtarget
= gen_reg_rtx (ext_mode
);
1588 bitsize_rtx
= GEN_INT (bitsize
);
1589 bitpos_rtx
= GEN_INT (xbitpos
);
1592 ? gen_extzv (xtarget
, xop0
, bitsize_rtx
, bitpos_rtx
)
1593 : gen_extv (xtarget
, xop0
, bitsize_rtx
, bitpos_rtx
));
1597 if (xtarget
== xspec_target
)
1599 if (xtarget
== xspec_target_subreg
)
1600 return xspec_target
;
1601 return convert_extracted_bit_field (xtarget
, mode
, tmode
, unsignedp
);
1603 delete_insns_since (last
);
1606 /* If OP0 is a memory, try copying it to a register and seeing if a
1607 cheap register alternative is available. */
1608 if (ext_mode
!= MAX_MACHINE_MODE
&& MEM_P (op0
))
1610 enum machine_mode bestmode
;
1612 /* Get the mode to use for inserting into this field. If
1613 OP0 is BLKmode, get the smallest mode consistent with the
1614 alignment. If OP0 is a non-BLKmode object that is no
1615 wider than EXT_MODE, use its mode. Otherwise, use the
1616 smallest mode containing the field. */
1618 if (GET_MODE (op0
) == BLKmode
1619 || (ext_mode
!= MAX_MACHINE_MODE
1620 && GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (ext_mode
)))
1621 bestmode
= get_best_mode (bitsize
, bitnum
, MEM_ALIGN (op0
),
1622 (ext_mode
== MAX_MACHINE_MODE
1623 ? VOIDmode
: ext_mode
),
1624 MEM_VOLATILE_P (op0
));
1626 bestmode
= GET_MODE (op0
);
1628 if (bestmode
!= VOIDmode
1629 && !(SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (op0
))
1630 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (op0
)))
1632 unsigned HOST_WIDE_INT xoffset
, xbitpos
;
1634 /* Compute the offset as a multiple of this unit,
1635 counting in bytes. */
1636 unit
= GET_MODE_BITSIZE (bestmode
);
1637 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1638 xbitpos
= bitnum
% unit
;
1640 /* Make sure the register is big enough for the whole field. */
1641 if (xoffset
* BITS_PER_UNIT
+ unit
1642 >= offset
* BITS_PER_UNIT
+ bitsize
)
1644 rtx last
, result
, xop0
;
1646 last
= get_last_insn ();
1648 /* Fetch it to a register in that size. */
1649 xop0
= adjust_address (op0
, bestmode
, xoffset
);
1650 xop0
= force_reg (bestmode
, xop0
);
1651 result
= extract_bit_field_1 (xop0
, bitsize
, xbitpos
,
1653 mode
, tmode
, false);
1657 delete_insns_since (last
);
1665 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1666 bitpos
, target
, unsignedp
);
1667 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1670 /* Generate code to extract a byte-field from STR_RTX
1671 containing BITSIZE bits, starting at BITNUM,
1672 and put it in TARGET if possible (if TARGET is nonzero).
1673 Regardless of TARGET, we return the rtx for where the value is placed.
1675 STR_RTX is the structure containing the byte (a REG or MEM).
1676 UNSIGNEDP is nonzero if this is an unsigned bit field.
1677 MODE is the natural mode of the field value once extracted.
1678 TMODE is the mode the caller would like the value to have;
1679 but the value may be returned with type MODE instead.
1681 If a TARGET is specified and we can store in it at no extra cost,
1682 we do so, and return TARGET.
1683 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1684 if they are equally easy. */
1687 extract_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1688 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, rtx target
,
1689 enum machine_mode mode
, enum machine_mode tmode
)
1691 return extract_bit_field_1 (str_rtx
, bitsize
, bitnum
, unsignedp
,
1692 target
, mode
, tmode
, true);
1695 /* Extract a bit field using shifts and boolean operations
1696 Returns an rtx to represent the value.
1697 OP0 addresses a register (word) or memory (byte).
1698 BITPOS says which bit within the word or byte the bit field starts in.
1699 OFFSET says how many bytes farther the bit field starts;
1700 it is 0 if OP0 is a register.
1701 BITSIZE says how many bits long the bit field is.
1702 (If OP0 is a register, it may be narrower than a full word,
1703 but BITPOS still counts within a full word,
1704 which is significant on bigendian machines.)
1706 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1707 If TARGET is nonzero, attempts to store the value there
1708 and return TARGET, but this is not guaranteed.
1709 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1712 extract_fixed_bit_field (enum machine_mode tmode
, rtx op0
,
1713 unsigned HOST_WIDE_INT offset
,
1714 unsigned HOST_WIDE_INT bitsize
,
1715 unsigned HOST_WIDE_INT bitpos
, rtx target
,
1718 unsigned int total_bits
= BITS_PER_WORD
;
1719 enum machine_mode mode
;
1721 if (GET_CODE (op0
) == SUBREG
|| REG_P (op0
))
1723 /* Special treatment for a bit field split across two registers. */
1724 if (bitsize
+ bitpos
> BITS_PER_WORD
)
1725 return extract_split_bit_field (op0
, bitsize
, bitpos
, unsignedp
);
1729 /* Get the proper mode to use for this field. We want a mode that
1730 includes the entire field. If such a mode would be larger than
1731 a word, we won't be doing the extraction the normal way. */
1733 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
1734 MEM_ALIGN (op0
), word_mode
, MEM_VOLATILE_P (op0
));
1736 if (mode
== VOIDmode
)
1737 /* The only way this should occur is if the field spans word
1739 return extract_split_bit_field (op0
, bitsize
,
1740 bitpos
+ offset
* BITS_PER_UNIT
,
1743 total_bits
= GET_MODE_BITSIZE (mode
);
1745 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1746 be in the range 0 to total_bits-1, and put any excess bytes in
1748 if (bitpos
>= total_bits
)
1750 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
1751 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
1755 /* Get ref to an aligned byte, halfword, or word containing the field.
1756 Adjust BITPOS to be position within a word,
1757 and OFFSET to be the offset of that word.
1758 Then alter OP0 to refer to that word. */
1759 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
1760 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
1761 op0
= adjust_address (op0
, mode
, offset
);
1764 mode
= GET_MODE (op0
);
1766 if (BYTES_BIG_ENDIAN
)
1767 /* BITPOS is the distance between our msb and that of OP0.
1768 Convert it to the distance from the lsb. */
1769 bitpos
= total_bits
- bitsize
- bitpos
;
1771 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1772 We have reduced the big-endian case to the little-endian case. */
1778 /* If the field does not already start at the lsb,
1779 shift it so it does. */
1780 tree amount
= build_int_cst (NULL_TREE
, bitpos
);
1781 /* Maybe propagate the target for the shift. */
1782 /* But not if we will return it--could confuse integrate.c. */
1783 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1784 if (tmode
!= mode
) subtarget
= 0;
1785 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1787 /* Convert the value to the desired mode. */
1789 op0
= convert_to_mode (tmode
, op0
, 1);
1791 /* Unless the msb of the field used to be the msb when we shifted,
1792 mask out the upper bits. */
1794 if (GET_MODE_BITSIZE (mode
) != bitpos
+ bitsize
)
1795 return expand_binop (GET_MODE (op0
), and_optab
, op0
,
1796 mask_rtx (GET_MODE (op0
), 0, bitsize
, 0),
1797 target
, 1, OPTAB_LIB_WIDEN
);
1801 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1802 then arithmetic-shift its lsb to the lsb of the word. */
1803 op0
= force_reg (mode
, op0
);
1807 /* Find the narrowest integer mode that contains the field. */
1809 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1810 mode
= GET_MODE_WIDER_MODE (mode
))
1811 if (GET_MODE_BITSIZE (mode
) >= bitsize
+ bitpos
)
1813 op0
= convert_to_mode (mode
, op0
, 0);
1817 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitpos
))
1820 = build_int_cst (NULL_TREE
,
1821 GET_MODE_BITSIZE (mode
) - (bitsize
+ bitpos
));
1822 /* Maybe propagate the target for the shift. */
1823 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1824 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1827 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
1828 build_int_cst (NULL_TREE
,
1829 GET_MODE_BITSIZE (mode
) - bitsize
),
1833 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1834 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1835 complement of that if COMPLEMENT. The mask is truncated if
1836 necessary to the width of mode MODE. The mask is zero-extended if
1837 BITSIZE+BITPOS is too small for MODE. */
1840 mask_rtx (enum machine_mode mode
, int bitpos
, int bitsize
, int complement
)
1842 HOST_WIDE_INT masklow
, maskhigh
;
1846 else if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1847 masklow
= (HOST_WIDE_INT
) -1 << bitpos
;
1851 if (bitpos
+ bitsize
< HOST_BITS_PER_WIDE_INT
)
1852 masklow
&= ((unsigned HOST_WIDE_INT
) -1
1853 >> (HOST_BITS_PER_WIDE_INT
- bitpos
- bitsize
));
1855 if (bitpos
<= HOST_BITS_PER_WIDE_INT
)
1858 maskhigh
= (HOST_WIDE_INT
) -1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
1862 else if (bitpos
+ bitsize
> HOST_BITS_PER_WIDE_INT
)
1863 maskhigh
&= ((unsigned HOST_WIDE_INT
) -1
1864 >> (2 * HOST_BITS_PER_WIDE_INT
- bitpos
- bitsize
));
1870 maskhigh
= ~maskhigh
;
1874 return immed_double_const (masklow
, maskhigh
, mode
);
1877 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1878 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1881 lshift_value (enum machine_mode mode
, rtx value
, int bitpos
, int bitsize
)
1883 unsigned HOST_WIDE_INT v
= INTVAL (value
);
1884 HOST_WIDE_INT low
, high
;
1886 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
1887 v
&= ~((HOST_WIDE_INT
) -1 << bitsize
);
1889 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1892 high
= (bitpos
> 0 ? (v
>> (HOST_BITS_PER_WIDE_INT
- bitpos
)) : 0);
1897 high
= v
<< (bitpos
- HOST_BITS_PER_WIDE_INT
);
1900 return immed_double_const (low
, high
, mode
);
1903 /* Extract a bit field that is split across two words
1904 and return an RTX for the result.
1906 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1907 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1908 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1911 extract_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1912 unsigned HOST_WIDE_INT bitpos
, int unsignedp
)
1915 unsigned int bitsdone
= 0;
1916 rtx result
= NULL_RTX
;
1919 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1921 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1922 unit
= BITS_PER_WORD
;
1924 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1926 while (bitsdone
< bitsize
)
1928 unsigned HOST_WIDE_INT thissize
;
1930 unsigned HOST_WIDE_INT thispos
;
1931 unsigned HOST_WIDE_INT offset
;
1933 offset
= (bitpos
+ bitsdone
) / unit
;
1934 thispos
= (bitpos
+ bitsdone
) % unit
;
1936 /* THISSIZE must not overrun a word boundary. Otherwise,
1937 extract_fixed_bit_field will call us again, and we will mutually
1939 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1940 thissize
= MIN (thissize
, unit
- thispos
);
1942 /* If OP0 is a register, then handle OFFSET here.
1944 When handling multiword bitfields, extract_bit_field may pass
1945 down a word_mode SUBREG of a larger REG for a bitfield that actually
1946 crosses a word boundary. Thus, for a SUBREG, we must find
1947 the current word starting from the base register. */
1948 if (GET_CODE (op0
) == SUBREG
)
1950 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1951 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1952 GET_MODE (SUBREG_REG (op0
)));
1955 else if (REG_P (op0
))
1957 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1963 /* Extract the parts in bit-counting order,
1964 whose meaning is determined by BYTES_PER_UNIT.
1965 OFFSET is in UNITs, and UNIT is in bits.
1966 extract_fixed_bit_field wants offset in bytes. */
1967 part
= extract_fixed_bit_field (word_mode
, word
,
1968 offset
* unit
/ BITS_PER_UNIT
,
1969 thissize
, thispos
, 0, 1);
1970 bitsdone
+= thissize
;
1972 /* Shift this part into place for the result. */
1973 if (BYTES_BIG_ENDIAN
)
1975 if (bitsize
!= bitsdone
)
1976 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1977 build_int_cst (NULL_TREE
, bitsize
- bitsdone
),
1982 if (bitsdone
!= thissize
)
1983 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1984 build_int_cst (NULL_TREE
,
1985 bitsdone
- thissize
), 0, 1);
1991 /* Combine the parts with bitwise or. This works
1992 because we extracted each part as an unsigned bit field. */
1993 result
= expand_binop (word_mode
, ior_optab
, part
, result
, NULL_RTX
, 1,
1999 /* Unsigned bit field: we are done. */
2002 /* Signed bit field: sign-extend with two arithmetic shifts. */
2003 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
2004 build_int_cst (NULL_TREE
, BITS_PER_WORD
- bitsize
),
2006 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
2007 build_int_cst (NULL_TREE
, BITS_PER_WORD
- bitsize
),
2011 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2012 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2013 MODE, fill the upper bits with zeros. Fail if the layout of either
2014 mode is unknown (as for CC modes) or if the extraction would involve
2015 unprofitable mode punning. Return the value on success, otherwise
2018 This is different from gen_lowpart* in these respects:
2020 - the returned value must always be considered an rvalue
2022 - when MODE is wider than SRC_MODE, the extraction involves
2025 - when MODE is smaller than SRC_MODE, the extraction involves
2026 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
2028 In other words, this routine performs a computation, whereas the
2029 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2033 extract_low_bits (enum machine_mode mode
, enum machine_mode src_mode
, rtx src
)
2035 enum machine_mode int_mode
, src_int_mode
;
2037 if (mode
== src_mode
)
2040 if (CONSTANT_P (src
))
2042 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2043 fails, it will happily create (subreg (symbol_ref)) or similar
2045 unsigned int byte
= subreg_lowpart_offset (mode
, src_mode
);
2046 rtx ret
= simplify_subreg (mode
, src
, src_mode
, byte
);
2050 if (GET_MODE (src
) == VOIDmode
2051 || !validate_subreg (mode
, src_mode
, src
, byte
))
2054 src
= force_reg (GET_MODE (src
), src
);
2055 return gen_rtx_SUBREG (mode
, src
, byte
);
2058 if (GET_MODE_CLASS (mode
) == MODE_CC
|| GET_MODE_CLASS (src_mode
) == MODE_CC
)
2061 if (GET_MODE_BITSIZE (mode
) == GET_MODE_BITSIZE (src_mode
)
2062 && MODES_TIEABLE_P (mode
, src_mode
))
2064 rtx x
= gen_lowpart_common (mode
, src
);
2069 src_int_mode
= int_mode_for_mode (src_mode
);
2070 int_mode
= int_mode_for_mode (mode
);
2071 if (src_int_mode
== BLKmode
|| int_mode
== BLKmode
)
2074 if (!MODES_TIEABLE_P (src_int_mode
, src_mode
))
2076 if (!MODES_TIEABLE_P (int_mode
, mode
))
2079 src
= gen_lowpart (src_int_mode
, src
);
2080 src
= convert_modes (int_mode
, src_int_mode
, src
, true);
2081 src
= gen_lowpart (mode
, src
);
2085 /* Add INC into TARGET. */
2088 expand_inc (rtx target
, rtx inc
)
2090 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
2092 target
, 0, OPTAB_LIB_WIDEN
);
2093 if (value
!= target
)
2094 emit_move_insn (target
, value
);
2097 /* Subtract DEC from TARGET. */
2100 expand_dec (rtx target
, rtx dec
)
2102 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
2104 target
, 0, OPTAB_LIB_WIDEN
);
2105 if (value
!= target
)
2106 emit_move_insn (target
, value
);
2109 /* Output a shift instruction for expression code CODE,
2110 with SHIFTED being the rtx for the value to shift,
2111 and AMOUNT the tree for the amount to shift by.
2112 Store the result in the rtx TARGET, if that is convenient.
2113 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2114 Return the rtx for where the value is. */
2117 expand_shift (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2118 tree amount
, rtx target
, int unsignedp
)
2121 int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
2122 int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
2123 optab lshift_optab
= ashl_optab
;
2124 optab rshift_arith_optab
= ashr_optab
;
2125 optab rshift_uns_optab
= lshr_optab
;
2126 optab lrotate_optab
= rotl_optab
;
2127 optab rrotate_optab
= rotr_optab
;
2128 enum machine_mode op1_mode
;
2130 bool speed
= optimize_insn_for_speed_p ();
2132 op1
= expand_normal (amount
);
2133 op1_mode
= GET_MODE (op1
);
2135 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2136 shift amount is a vector, use the vector/vector shift patterns. */
2137 if (VECTOR_MODE_P (mode
) && VECTOR_MODE_P (op1_mode
))
2139 lshift_optab
= vashl_optab
;
2140 rshift_arith_optab
= vashr_optab
;
2141 rshift_uns_optab
= vlshr_optab
;
2142 lrotate_optab
= vrotl_optab
;
2143 rrotate_optab
= vrotr_optab
;
2146 /* Previously detected shift-counts computed by NEGATE_EXPR
2147 and shifted in the other direction; but that does not work
2150 if (SHIFT_COUNT_TRUNCATED
)
2152 if (CONST_INT_P (op1
)
2153 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
) >=
2154 (unsigned HOST_WIDE_INT
) GET_MODE_BITSIZE (mode
)))
2155 op1
= GEN_INT ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
2156 % GET_MODE_BITSIZE (mode
));
2157 else if (GET_CODE (op1
) == SUBREG
2158 && subreg_lowpart_p (op1
)
2159 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (op1
))))
2160 op1
= SUBREG_REG (op1
);
2163 if (op1
== const0_rtx
)
2166 /* Check whether its cheaper to implement a left shift by a constant
2167 bit count by a sequence of additions. */
2168 if (code
== LSHIFT_EXPR
2169 && CONST_INT_P (op1
)
2171 && INTVAL (op1
) < GET_MODE_BITSIZE (mode
)
2172 && INTVAL (op1
) < MAX_BITS_PER_WORD
2173 && shift_cost
[speed
][mode
][INTVAL (op1
)] > INTVAL (op1
) * add_cost
[speed
][mode
]
2174 && shift_cost
[speed
][mode
][INTVAL (op1
)] != MAX_COST
)
2177 for (i
= 0; i
< INTVAL (op1
); i
++)
2179 temp
= force_reg (mode
, shifted
);
2180 shifted
= expand_binop (mode
, add_optab
, temp
, temp
, NULL_RTX
,
2181 unsignedp
, OPTAB_LIB_WIDEN
);
2186 for (attempt
= 0; temp
== 0 && attempt
< 3; attempt
++)
2188 enum optab_methods methods
;
2191 methods
= OPTAB_DIRECT
;
2192 else if (attempt
== 1)
2193 methods
= OPTAB_WIDEN
;
2195 methods
= OPTAB_LIB_WIDEN
;
2199 /* Widening does not work for rotation. */
2200 if (methods
== OPTAB_WIDEN
)
2202 else if (methods
== OPTAB_LIB_WIDEN
)
2204 /* If we have been unable to open-code this by a rotation,
2205 do it as the IOR of two shifts. I.e., to rotate A
2206 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2207 where C is the bitsize of A.
2209 It is theoretically possible that the target machine might
2210 not be able to perform either shift and hence we would
2211 be making two libcalls rather than just the one for the
2212 shift (similarly if IOR could not be done). We will allow
2213 this extremely unlikely lossage to avoid complicating the
2216 rtx subtarget
= target
== shifted
? 0 : target
;
2217 tree new_amount
, other_amount
;
2219 tree type
= TREE_TYPE (amount
);
2220 if (GET_MODE (op1
) != TYPE_MODE (type
)
2221 && GET_MODE (op1
) != VOIDmode
)
2222 op1
= convert_to_mode (TYPE_MODE (type
), op1
, 1);
2223 new_amount
= make_tree (type
, op1
);
2225 = fold_build2 (MINUS_EXPR
, type
,
2226 build_int_cst (type
, GET_MODE_BITSIZE (mode
)),
2229 shifted
= force_reg (mode
, shifted
);
2231 temp
= expand_shift (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
2232 mode
, shifted
, new_amount
, 0, 1);
2233 temp1
= expand_shift (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
2234 mode
, shifted
, other_amount
, subtarget
, 1);
2235 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
2236 unsignedp
, methods
);
2239 temp
= expand_binop (mode
,
2240 left
? lrotate_optab
: rrotate_optab
,
2241 shifted
, op1
, target
, unsignedp
, methods
);
2244 temp
= expand_binop (mode
,
2245 left
? lshift_optab
: rshift_uns_optab
,
2246 shifted
, op1
, target
, unsignedp
, methods
);
2248 /* Do arithmetic shifts.
2249 Also, if we are going to widen the operand, we can just as well
2250 use an arithmetic right-shift instead of a logical one. */
2251 if (temp
== 0 && ! rotate
2252 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
2254 enum optab_methods methods1
= methods
;
2256 /* If trying to widen a log shift to an arithmetic shift,
2257 don't accept an arithmetic shift of the same size. */
2259 methods1
= OPTAB_MUST_WIDEN
;
2261 /* Arithmetic shift */
2263 temp
= expand_binop (mode
,
2264 left
? lshift_optab
: rshift_arith_optab
,
2265 shifted
, op1
, target
, unsignedp
, methods1
);
2268 /* We used to try extzv here for logical right shifts, but that was
2269 only useful for one machine, the VAX, and caused poor code
2270 generation there for lshrdi3, so the code was deleted and a
2271 define_expand for lshrsi3 was added to vax.md. */
2291 /* This structure holds the "cost" of a multiply sequence. The
2292 "cost" field holds the total rtx_cost of every operator in the
2293 synthetic multiplication sequence, hence cost(a op b) is defined
2294 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2295 The "latency" field holds the minimum possible latency of the
2296 synthetic multiply, on a hypothetical infinitely parallel CPU.
2297 This is the critical path, or the maximum height, of the expression
2298 tree which is the sum of rtx_costs on the most expensive path from
2299 any leaf to the root. Hence latency(a op b) is defined as zero for
2300 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2303 short cost
; /* Total rtx_cost of the multiplication sequence. */
2304 short latency
; /* The latency of the multiplication sequence. */
2307 /* This macro is used to compare a pointer to a mult_cost against an
2308 single integer "rtx_cost" value. This is equivalent to the macro
2309 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2310 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2311 || ((X)->cost == (Y) && (X)->latency < (Y)))
2313 /* This macro is used to compare two pointers to mult_costs against
2314 each other. The macro returns true if X is cheaper than Y.
2315 Currently, the cheaper of two mult_costs is the one with the
2316 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2317 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2318 || ((X)->cost == (Y)->cost \
2319 && (X)->latency < (Y)->latency))
2321 /* This structure records a sequence of operations.
2322 `ops' is the number of operations recorded.
2323 `cost' is their total cost.
2324 The operations are stored in `op' and the corresponding
2325 logarithms of the integer coefficients in `log'.
2327 These are the operations:
2328 alg_zero total := 0;
2329 alg_m total := multiplicand;
2330 alg_shift total := total * coeff
2331 alg_add_t_m2 total := total + multiplicand * coeff;
2332 alg_sub_t_m2 total := total - multiplicand * coeff;
2333 alg_add_factor total := total * coeff + total;
2334 alg_sub_factor total := total * coeff - total;
2335 alg_add_t2_m total := total * coeff + multiplicand;
2336 alg_sub_t2_m total := total * coeff - multiplicand;
2338 The first operand must be either alg_zero or alg_m. */
2342 struct mult_cost cost
;
2344 /* The size of the OP and LOG fields are not directly related to the
2345 word size, but the worst-case algorithms will be if we have few
2346 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2347 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2348 in total wordsize operations. */
2349 enum alg_code op
[MAX_BITS_PER_WORD
];
2350 char log
[MAX_BITS_PER_WORD
];
2353 /* The entry for our multiplication cache/hash table. */
2354 struct alg_hash_entry
{
2355 /* The number we are multiplying by. */
2356 unsigned HOST_WIDE_INT t
;
2358 /* The mode in which we are multiplying something by T. */
2359 enum machine_mode mode
;
2361 /* The best multiplication algorithm for t. */
2364 /* The cost of multiplication if ALG_CODE is not alg_impossible.
2365 Otherwise, the cost within which multiplication by T is
2367 struct mult_cost cost
;
2369 /* OPtimized for speed? */
2373 /* The number of cache/hash entries. */
2374 #if HOST_BITS_PER_WIDE_INT == 64
2375 #define NUM_ALG_HASH_ENTRIES 1031
2377 #define NUM_ALG_HASH_ENTRIES 307
2380 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2381 actually a hash table. If we have a collision, that the older
2382 entry is kicked out. */
2383 static struct alg_hash_entry alg_hash
[NUM_ALG_HASH_ENTRIES
];
2385 /* Indicates the type of fixup needed after a constant multiplication.
2386 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2387 the result should be negated, and ADD_VARIANT means that the
2388 multiplicand should be added to the result. */
2389 enum mult_variant
{basic_variant
, negate_variant
, add_variant
};
2391 static void synth_mult (struct algorithm
*, unsigned HOST_WIDE_INT
,
2392 const struct mult_cost
*, enum machine_mode mode
);
2393 static bool choose_mult_variant (enum machine_mode
, HOST_WIDE_INT
,
2394 struct algorithm
*, enum mult_variant
*, int);
2395 static rtx
expand_mult_const (enum machine_mode
, rtx
, HOST_WIDE_INT
, rtx
,
2396 const struct algorithm
*, enum mult_variant
);
2397 static unsigned HOST_WIDE_INT
choose_multiplier (unsigned HOST_WIDE_INT
, int,
2398 int, rtx
*, int *, int *);
2399 static unsigned HOST_WIDE_INT
invert_mod2n (unsigned HOST_WIDE_INT
, int);
2400 static rtx
extract_high_half (enum machine_mode
, rtx
);
2401 static rtx
expand_mult_highpart (enum machine_mode
, rtx
, rtx
, rtx
, int, int);
2402 static rtx
expand_mult_highpart_optab (enum machine_mode
, rtx
, rtx
, rtx
,
2404 /* Compute and return the best algorithm for multiplying by T.
2405 The algorithm must cost less than cost_limit
2406 If retval.cost >= COST_LIMIT, no algorithm was found and all
2407 other field of the returned struct are undefined.
2408 MODE is the machine mode of the multiplication. */
2411 synth_mult (struct algorithm
*alg_out
, unsigned HOST_WIDE_INT t
,
2412 const struct mult_cost
*cost_limit
, enum machine_mode mode
)
2415 struct algorithm
*alg_in
, *best_alg
;
2416 struct mult_cost best_cost
;
2417 struct mult_cost new_limit
;
2418 int op_cost
, op_latency
;
2419 unsigned HOST_WIDE_INT orig_t
= t
;
2420 unsigned HOST_WIDE_INT q
;
2421 int maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
2423 bool cache_hit
= false;
2424 enum alg_code cache_alg
= alg_zero
;
2425 bool speed
= optimize_insn_for_speed_p ();
2427 /* Indicate that no algorithm is yet found. If no algorithm
2428 is found, this value will be returned and indicate failure. */
2429 alg_out
->cost
.cost
= cost_limit
->cost
+ 1;
2430 alg_out
->cost
.latency
= cost_limit
->latency
+ 1;
2432 if (cost_limit
->cost
< 0
2433 || (cost_limit
->cost
== 0 && cost_limit
->latency
<= 0))
2436 /* Restrict the bits of "t" to the multiplication's mode. */
2437 t
&= GET_MODE_MASK (mode
);
2439 /* t == 1 can be done in zero cost. */
2443 alg_out
->cost
.cost
= 0;
2444 alg_out
->cost
.latency
= 0;
2445 alg_out
->op
[0] = alg_m
;
2449 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2453 if (MULT_COST_LESS (cost_limit
, zero_cost
[speed
]))
2458 alg_out
->cost
.cost
= zero_cost
[speed
];
2459 alg_out
->cost
.latency
= zero_cost
[speed
];
2460 alg_out
->op
[0] = alg_zero
;
2465 /* We'll be needing a couple extra algorithm structures now. */
2467 alg_in
= XALLOCA (struct algorithm
);
2468 best_alg
= XALLOCA (struct algorithm
);
2469 best_cost
= *cost_limit
;
2471 /* Compute the hash index. */
2472 hash_index
= (t
^ (unsigned int) mode
^ (speed
* 256)) % NUM_ALG_HASH_ENTRIES
;
2474 /* See if we already know what to do for T. */
2475 if (alg_hash
[hash_index
].t
== t
2476 && alg_hash
[hash_index
].mode
== mode
2477 && alg_hash
[hash_index
].mode
== mode
2478 && alg_hash
[hash_index
].speed
== speed
2479 && alg_hash
[hash_index
].alg
!= alg_unknown
)
2481 cache_alg
= alg_hash
[hash_index
].alg
;
2483 if (cache_alg
== alg_impossible
)
2485 /* The cache tells us that it's impossible to synthesize
2486 multiplication by T within alg_hash[hash_index].cost. */
2487 if (!CHEAPER_MULT_COST (&alg_hash
[hash_index
].cost
, cost_limit
))
2488 /* COST_LIMIT is at least as restrictive as the one
2489 recorded in the hash table, in which case we have no
2490 hope of synthesizing a multiplication. Just
2494 /* If we get here, COST_LIMIT is less restrictive than the
2495 one recorded in the hash table, so we may be able to
2496 synthesize a multiplication. Proceed as if we didn't
2497 have the cache entry. */
2501 if (CHEAPER_MULT_COST (cost_limit
, &alg_hash
[hash_index
].cost
))
2502 /* The cached algorithm shows that this multiplication
2503 requires more cost than COST_LIMIT. Just return. This
2504 way, we don't clobber this cache entry with
2505 alg_impossible but retain useful information. */
2517 goto do_alg_addsub_t_m2
;
2519 case alg_add_factor
:
2520 case alg_sub_factor
:
2521 goto do_alg_addsub_factor
;
2524 goto do_alg_add_t2_m
;
2527 goto do_alg_sub_t2_m
;
2535 /* If we have a group of zero bits at the low-order part of T, try
2536 multiplying by the remaining bits and then doing a shift. */
2541 m
= floor_log2 (t
& -t
); /* m = number of low zero bits */
2545 /* The function expand_shift will choose between a shift and
2546 a sequence of additions, so the observed cost is given as
2547 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2548 op_cost
= m
* add_cost
[speed
][mode
];
2549 if (shift_cost
[speed
][mode
][m
] < op_cost
)
2550 op_cost
= shift_cost
[speed
][mode
][m
];
2551 new_limit
.cost
= best_cost
.cost
- op_cost
;
2552 new_limit
.latency
= best_cost
.latency
- op_cost
;
2553 synth_mult (alg_in
, q
, &new_limit
, mode
);
2555 alg_in
->cost
.cost
+= op_cost
;
2556 alg_in
->cost
.latency
+= op_cost
;
2557 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2559 struct algorithm
*x
;
2560 best_cost
= alg_in
->cost
;
2561 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2562 best_alg
->log
[best_alg
->ops
] = m
;
2563 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2566 /* See if treating ORIG_T as a signed number yields a better
2567 sequence. Try this sequence only for a negative ORIG_T
2568 as it would be useless for a non-negative ORIG_T. */
2569 if ((HOST_WIDE_INT
) orig_t
< 0)
2571 /* Shift ORIG_T as follows because a right shift of a
2572 negative-valued signed type is implementation
2574 q
= ~(~orig_t
>> m
);
2575 /* The function expand_shift will choose between a shift
2576 and a sequence of additions, so the observed cost is
2577 given as MIN (m * add_cost[speed][mode],
2578 shift_cost[speed][mode][m]). */
2579 op_cost
= m
* add_cost
[speed
][mode
];
2580 if (shift_cost
[speed
][mode
][m
] < op_cost
)
2581 op_cost
= shift_cost
[speed
][mode
][m
];
2582 new_limit
.cost
= best_cost
.cost
- op_cost
;
2583 new_limit
.latency
= best_cost
.latency
- op_cost
;
2584 synth_mult (alg_in
, q
, &new_limit
, mode
);
2586 alg_in
->cost
.cost
+= op_cost
;
2587 alg_in
->cost
.latency
+= op_cost
;
2588 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2590 struct algorithm
*x
;
2591 best_cost
= alg_in
->cost
;
2592 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2593 best_alg
->log
[best_alg
->ops
] = m
;
2594 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2602 /* If we have an odd number, add or subtract one. */
2605 unsigned HOST_WIDE_INT w
;
2608 for (w
= 1; (w
& t
) != 0; w
<<= 1)
2610 /* If T was -1, then W will be zero after the loop. This is another
2611 case where T ends with ...111. Handling this with (T + 1) and
2612 subtract 1 produces slightly better code and results in algorithm
2613 selection much faster than treating it like the ...0111 case
2617 /* Reject the case where t is 3.
2618 Thus we prefer addition in that case. */
2621 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2623 op_cost
= add_cost
[speed
][mode
];
2624 new_limit
.cost
= best_cost
.cost
- op_cost
;
2625 new_limit
.latency
= best_cost
.latency
- op_cost
;
2626 synth_mult (alg_in
, t
+ 1, &new_limit
, mode
);
2628 alg_in
->cost
.cost
+= op_cost
;
2629 alg_in
->cost
.latency
+= op_cost
;
2630 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2632 struct algorithm
*x
;
2633 best_cost
= alg_in
->cost
;
2634 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2635 best_alg
->log
[best_alg
->ops
] = 0;
2636 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2641 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2643 op_cost
= add_cost
[speed
][mode
];
2644 new_limit
.cost
= best_cost
.cost
- op_cost
;
2645 new_limit
.latency
= best_cost
.latency
- op_cost
;
2646 synth_mult (alg_in
, t
- 1, &new_limit
, mode
);
2648 alg_in
->cost
.cost
+= op_cost
;
2649 alg_in
->cost
.latency
+= op_cost
;
2650 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2652 struct algorithm
*x
;
2653 best_cost
= alg_in
->cost
;
2654 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2655 best_alg
->log
[best_alg
->ops
] = 0;
2656 best_alg
->op
[best_alg
->ops
] = alg_add_t_m2
;
2660 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2661 quickly with a - a * n for some appropriate constant n. */
2662 m
= exact_log2 (-orig_t
+ 1);
2663 if (m
>= 0 && m
< maxm
)
2665 op_cost
= shiftsub1_cost
[speed
][mode
][m
];
2666 new_limit
.cost
= best_cost
.cost
- op_cost
;
2667 new_limit
.latency
= best_cost
.latency
- op_cost
;
2668 synth_mult (alg_in
, (unsigned HOST_WIDE_INT
) (-orig_t
+ 1) >> m
, &new_limit
, mode
);
2670 alg_in
->cost
.cost
+= op_cost
;
2671 alg_in
->cost
.latency
+= op_cost
;
2672 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2674 struct algorithm
*x
;
2675 best_cost
= alg_in
->cost
;
2676 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2677 best_alg
->log
[best_alg
->ops
] = m
;
2678 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2686 /* Look for factors of t of the form
2687 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2688 If we find such a factor, we can multiply by t using an algorithm that
2689 multiplies by q, shift the result by m and add/subtract it to itself.
2691 We search for large factors first and loop down, even if large factors
2692 are less probable than small; if we find a large factor we will find a
2693 good sequence quickly, and therefore be able to prune (by decreasing
2694 COST_LIMIT) the search. */
2696 do_alg_addsub_factor
:
2697 for (m
= floor_log2 (t
- 1); m
>= 2; m
--)
2699 unsigned HOST_WIDE_INT d
;
2701 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) + 1;
2702 if (t
% d
== 0 && t
> d
&& m
< maxm
2703 && (!cache_hit
|| cache_alg
== alg_add_factor
))
2705 /* If the target has a cheap shift-and-add instruction use
2706 that in preference to a shift insn followed by an add insn.
2707 Assume that the shift-and-add is "atomic" with a latency
2708 equal to its cost, otherwise assume that on superscalar
2709 hardware the shift may be executed concurrently with the
2710 earlier steps in the algorithm. */
2711 op_cost
= add_cost
[speed
][mode
] + shift_cost
[speed
][mode
][m
];
2712 if (shiftadd_cost
[speed
][mode
][m
] < op_cost
)
2714 op_cost
= shiftadd_cost
[speed
][mode
][m
];
2715 op_latency
= op_cost
;
2718 op_latency
= add_cost
[speed
][mode
];
2720 new_limit
.cost
= best_cost
.cost
- op_cost
;
2721 new_limit
.latency
= best_cost
.latency
- op_latency
;
2722 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2724 alg_in
->cost
.cost
+= op_cost
;
2725 alg_in
->cost
.latency
+= op_latency
;
2726 if (alg_in
->cost
.latency
< op_cost
)
2727 alg_in
->cost
.latency
= op_cost
;
2728 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2730 struct algorithm
*x
;
2731 best_cost
= alg_in
->cost
;
2732 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2733 best_alg
->log
[best_alg
->ops
] = m
;
2734 best_alg
->op
[best_alg
->ops
] = alg_add_factor
;
2736 /* Other factors will have been taken care of in the recursion. */
2740 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) - 1;
2741 if (t
% d
== 0 && t
> d
&& m
< maxm
2742 && (!cache_hit
|| cache_alg
== alg_sub_factor
))
2744 /* If the target has a cheap shift-and-subtract insn use
2745 that in preference to a shift insn followed by a sub insn.
2746 Assume that the shift-and-sub is "atomic" with a latency
2747 equal to it's cost, otherwise assume that on superscalar
2748 hardware the shift may be executed concurrently with the
2749 earlier steps in the algorithm. */
2750 op_cost
= add_cost
[speed
][mode
] + shift_cost
[speed
][mode
][m
];
2751 if (shiftsub0_cost
[speed
][mode
][m
] < op_cost
)
2753 op_cost
= shiftsub0_cost
[speed
][mode
][m
];
2754 op_latency
= op_cost
;
2757 op_latency
= add_cost
[speed
][mode
];
2759 new_limit
.cost
= best_cost
.cost
- op_cost
;
2760 new_limit
.latency
= best_cost
.latency
- op_latency
;
2761 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2763 alg_in
->cost
.cost
+= op_cost
;
2764 alg_in
->cost
.latency
+= op_latency
;
2765 if (alg_in
->cost
.latency
< op_cost
)
2766 alg_in
->cost
.latency
= op_cost
;
2767 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2769 struct algorithm
*x
;
2770 best_cost
= alg_in
->cost
;
2771 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2772 best_alg
->log
[best_alg
->ops
] = m
;
2773 best_alg
->op
[best_alg
->ops
] = alg_sub_factor
;
2781 /* Try shift-and-add (load effective address) instructions,
2782 i.e. do a*3, a*5, a*9. */
2789 if (m
>= 0 && m
< maxm
)
2791 op_cost
= shiftadd_cost
[speed
][mode
][m
];
2792 new_limit
.cost
= best_cost
.cost
- op_cost
;
2793 new_limit
.latency
= best_cost
.latency
- op_cost
;
2794 synth_mult (alg_in
, (t
- 1) >> m
, &new_limit
, mode
);
2796 alg_in
->cost
.cost
+= op_cost
;
2797 alg_in
->cost
.latency
+= op_cost
;
2798 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2800 struct algorithm
*x
;
2801 best_cost
= alg_in
->cost
;
2802 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2803 best_alg
->log
[best_alg
->ops
] = m
;
2804 best_alg
->op
[best_alg
->ops
] = alg_add_t2_m
;
2814 if (m
>= 0 && m
< maxm
)
2816 op_cost
= shiftsub0_cost
[speed
][mode
][m
];
2817 new_limit
.cost
= best_cost
.cost
- op_cost
;
2818 new_limit
.latency
= best_cost
.latency
- op_cost
;
2819 synth_mult (alg_in
, (t
+ 1) >> m
, &new_limit
, mode
);
2821 alg_in
->cost
.cost
+= op_cost
;
2822 alg_in
->cost
.latency
+= op_cost
;
2823 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2825 struct algorithm
*x
;
2826 best_cost
= alg_in
->cost
;
2827 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2828 best_alg
->log
[best_alg
->ops
] = m
;
2829 best_alg
->op
[best_alg
->ops
] = alg_sub_t2_m
;
2837 /* If best_cost has not decreased, we have not found any algorithm. */
2838 if (!CHEAPER_MULT_COST (&best_cost
, cost_limit
))
2840 /* We failed to find an algorithm. Record alg_impossible for
2841 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2842 we are asked to find an algorithm for T within the same or
2843 lower COST_LIMIT, we can immediately return to the
2845 alg_hash
[hash_index
].t
= t
;
2846 alg_hash
[hash_index
].mode
= mode
;
2847 alg_hash
[hash_index
].speed
= speed
;
2848 alg_hash
[hash_index
].alg
= alg_impossible
;
2849 alg_hash
[hash_index
].cost
= *cost_limit
;
2853 /* Cache the result. */
2856 alg_hash
[hash_index
].t
= t
;
2857 alg_hash
[hash_index
].mode
= mode
;
2858 alg_hash
[hash_index
].speed
= speed
;
2859 alg_hash
[hash_index
].alg
= best_alg
->op
[best_alg
->ops
];
2860 alg_hash
[hash_index
].cost
.cost
= best_cost
.cost
;
2861 alg_hash
[hash_index
].cost
.latency
= best_cost
.latency
;
2864 /* If we are getting a too long sequence for `struct algorithm'
2865 to record, make this search fail. */
2866 if (best_alg
->ops
== MAX_BITS_PER_WORD
)
2869 /* Copy the algorithm from temporary space to the space at alg_out.
2870 We avoid using structure assignment because the majority of
2871 best_alg is normally undefined, and this is a critical function. */
2872 alg_out
->ops
= best_alg
->ops
+ 1;
2873 alg_out
->cost
= best_cost
;
2874 memcpy (alg_out
->op
, best_alg
->op
,
2875 alg_out
->ops
* sizeof *alg_out
->op
);
2876 memcpy (alg_out
->log
, best_alg
->log
,
2877 alg_out
->ops
* sizeof *alg_out
->log
);
2880 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2881 Try three variations:
2883 - a shift/add sequence based on VAL itself
2884 - a shift/add sequence based on -VAL, followed by a negation
2885 - a shift/add sequence based on VAL - 1, followed by an addition.
2887 Return true if the cheapest of these cost less than MULT_COST,
2888 describing the algorithm in *ALG and final fixup in *VARIANT. */
2891 choose_mult_variant (enum machine_mode mode
, HOST_WIDE_INT val
,
2892 struct algorithm
*alg
, enum mult_variant
*variant
,
2895 struct algorithm alg2
;
2896 struct mult_cost limit
;
2898 bool speed
= optimize_insn_for_speed_p ();
2900 /* Fail quickly for impossible bounds. */
2904 /* Ensure that mult_cost provides a reasonable upper bound.
2905 Any constant multiplication can be performed with less
2906 than 2 * bits additions. */
2907 op_cost
= 2 * GET_MODE_BITSIZE (mode
) * add_cost
[speed
][mode
];
2908 if (mult_cost
> op_cost
)
2909 mult_cost
= op_cost
;
2911 *variant
= basic_variant
;
2912 limit
.cost
= mult_cost
;
2913 limit
.latency
= mult_cost
;
2914 synth_mult (alg
, val
, &limit
, mode
);
2916 /* This works only if the inverted value actually fits in an
2918 if (HOST_BITS_PER_INT
>= GET_MODE_BITSIZE (mode
))
2920 op_cost
= neg_cost
[speed
][mode
];
2921 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2923 limit
.cost
= alg
->cost
.cost
- op_cost
;
2924 limit
.latency
= alg
->cost
.latency
- op_cost
;
2928 limit
.cost
= mult_cost
- op_cost
;
2929 limit
.latency
= mult_cost
- op_cost
;
2932 synth_mult (&alg2
, -val
, &limit
, mode
);
2933 alg2
.cost
.cost
+= op_cost
;
2934 alg2
.cost
.latency
+= op_cost
;
2935 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2936 *alg
= alg2
, *variant
= negate_variant
;
2939 /* This proves very useful for division-by-constant. */
2940 op_cost
= add_cost
[speed
][mode
];
2941 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2943 limit
.cost
= alg
->cost
.cost
- op_cost
;
2944 limit
.latency
= alg
->cost
.latency
- op_cost
;
2948 limit
.cost
= mult_cost
- op_cost
;
2949 limit
.latency
= mult_cost
- op_cost
;
2952 synth_mult (&alg2
, val
- 1, &limit
, mode
);
2953 alg2
.cost
.cost
+= op_cost
;
2954 alg2
.cost
.latency
+= op_cost
;
2955 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2956 *alg
= alg2
, *variant
= add_variant
;
2958 return MULT_COST_LESS (&alg
->cost
, mult_cost
);
2961 /* A subroutine of expand_mult, used for constant multiplications.
2962 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2963 convenient. Use the shift/add sequence described by ALG and apply
2964 the final fixup specified by VARIANT. */
2967 expand_mult_const (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT val
,
2968 rtx target
, const struct algorithm
*alg
,
2969 enum mult_variant variant
)
2971 HOST_WIDE_INT val_so_far
;
2972 rtx insn
, accum
, tem
;
2974 enum machine_mode nmode
;
2976 /* Avoid referencing memory over and over and invalid sharing
2978 op0
= force_reg (mode
, op0
);
2980 /* ACCUM starts out either as OP0 or as a zero, depending on
2981 the first operation. */
2983 if (alg
->op
[0] == alg_zero
)
2985 accum
= copy_to_mode_reg (mode
, const0_rtx
);
2988 else if (alg
->op
[0] == alg_m
)
2990 accum
= copy_to_mode_reg (mode
, op0
);
2996 for (opno
= 1; opno
< alg
->ops
; opno
++)
2998 int log
= alg
->log
[opno
];
2999 rtx shift_subtarget
= optimize
? 0 : accum
;
3001 = (opno
== alg
->ops
- 1 && target
!= 0 && variant
!= add_variant
3004 rtx accum_target
= optimize
? 0 : accum
;
3006 switch (alg
->op
[opno
])
3009 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3010 build_int_cst (NULL_TREE
, log
),
3016 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
3017 build_int_cst (NULL_TREE
, log
),
3019 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
3020 add_target
? add_target
: accum_target
);
3021 val_so_far
+= (HOST_WIDE_INT
) 1 << log
;
3025 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
3026 build_int_cst (NULL_TREE
, log
),
3028 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, tem
),
3029 add_target
? add_target
: accum_target
);
3030 val_so_far
-= (HOST_WIDE_INT
) 1 << log
;
3034 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3035 build_int_cst (NULL_TREE
, log
),
3038 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
),
3039 add_target
? add_target
: accum_target
);
3040 val_so_far
= (val_so_far
<< log
) + 1;
3044 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3045 build_int_cst (NULL_TREE
, log
),
3046 shift_subtarget
, 0);
3047 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, op0
),
3048 add_target
? add_target
: accum_target
);
3049 val_so_far
= (val_so_far
<< log
) - 1;
3052 case alg_add_factor
:
3053 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3054 build_int_cst (NULL_TREE
, log
),
3056 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
3057 add_target
? add_target
: accum_target
);
3058 val_so_far
+= val_so_far
<< log
;
3061 case alg_sub_factor
:
3062 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
3063 build_int_cst (NULL_TREE
, log
),
3065 accum
= force_operand (gen_rtx_MINUS (mode
, tem
, accum
),
3067 ? add_target
: (optimize
? 0 : tem
)));
3068 val_so_far
= (val_so_far
<< log
) - val_so_far
;
3075 /* Write a REG_EQUAL note on the last insn so that we can cse
3076 multiplication sequences. Note that if ACCUM is a SUBREG,
3077 we've set the inner register and must properly indicate
3080 tem
= op0
, nmode
= mode
;
3081 if (GET_CODE (accum
) == SUBREG
)
3083 nmode
= GET_MODE (SUBREG_REG (accum
));
3084 tem
= gen_lowpart (nmode
, op0
);
3087 insn
= get_last_insn ();
3088 set_unique_reg_note (insn
, REG_EQUAL
,
3089 gen_rtx_MULT (nmode
, tem
,
3090 GEN_INT (val_so_far
)));
3093 if (variant
== negate_variant
)
3095 val_so_far
= -val_so_far
;
3096 accum
= expand_unop (mode
, neg_optab
, accum
, target
, 0);
3098 else if (variant
== add_variant
)
3100 val_so_far
= val_so_far
+ 1;
3101 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
), target
);
3104 /* Compare only the bits of val and val_so_far that are significant
3105 in the result mode, to avoid sign-/zero-extension confusion. */
3106 val
&= GET_MODE_MASK (mode
);
3107 val_so_far
&= GET_MODE_MASK (mode
);
3108 gcc_assert (val
== val_so_far
);
3113 /* Perform a multiplication and return an rtx for the result.
3114 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3115 TARGET is a suggestion for where to store the result (an rtx).
3117 We check specially for a constant integer as OP1.
3118 If you want this check for OP0 as well, then before calling
3119 you should swap the two operands if OP0 would be constant. */
3122 expand_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3125 enum mult_variant variant
;
3126 struct algorithm algorithm
;
3128 bool speed
= optimize_insn_for_speed_p ();
3130 /* Handling const0_rtx here allows us to use zero as a rogue value for
3132 if (op1
== const0_rtx
)
3134 if (op1
== const1_rtx
)
3136 if (op1
== constm1_rtx
)
3137 return expand_unop (mode
,
3138 GET_MODE_CLASS (mode
) == MODE_INT
3139 && !unsignedp
&& flag_trapv
3140 ? negv_optab
: neg_optab
,
3143 /* These are the operations that are potentially turned into a sequence
3144 of shifts and additions. */
3145 if (SCALAR_INT_MODE_P (mode
)
3146 && (unsignedp
|| !flag_trapv
))
3148 HOST_WIDE_INT coeff
= 0;
3149 rtx fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3151 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3152 less than or equal in size to `unsigned int' this doesn't matter.
3153 If the mode is larger than `unsigned int', then synth_mult works
3154 only if the constant value exactly fits in an `unsigned int' without
3155 any truncation. This means that multiplying by negative values does
3156 not work; results are off by 2^32 on a 32 bit machine. */
3158 if (CONST_INT_P (op1
))
3160 /* Attempt to handle multiplication of DImode values by negative
3161 coefficients, by performing the multiplication by a positive
3162 multiplier and then inverting the result. */
3163 if (INTVAL (op1
) < 0
3164 && GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
)
3166 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3167 result is interpreted as an unsigned coefficient.
3168 Exclude cost of op0 from max_cost to match the cost
3169 calculation of the synth_mult. */
3170 max_cost
= rtx_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), SET
, speed
)
3171 - neg_cost
[speed
][mode
];
3173 && choose_mult_variant (mode
, -INTVAL (op1
), &algorithm
,
3174 &variant
, max_cost
))
3176 rtx temp
= expand_mult_const (mode
, op0
, -INTVAL (op1
),
3177 NULL_RTX
, &algorithm
,
3179 return expand_unop (mode
, neg_optab
, temp
, target
, 0);
3182 else coeff
= INTVAL (op1
);
3184 else if (GET_CODE (op1
) == CONST_DOUBLE
)
3186 /* If we are multiplying in DImode, it may still be a win
3187 to try to work with shifts and adds. */
3188 if (CONST_DOUBLE_HIGH (op1
) == 0
3189 && CONST_DOUBLE_LOW (op1
) > 0)
3190 coeff
= CONST_DOUBLE_LOW (op1
);
3191 else if (CONST_DOUBLE_LOW (op1
) == 0
3192 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1
)))
3194 int shift
= floor_log2 (CONST_DOUBLE_HIGH (op1
))
3195 + HOST_BITS_PER_WIDE_INT
;
3196 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3197 build_int_cst (NULL_TREE
, shift
),
3202 /* We used to test optimize here, on the grounds that it's better to
3203 produce a smaller program when -O is not used. But this causes
3204 such a terrible slowdown sometimes that it seems better to always
3208 /* Special case powers of two. */
3209 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3210 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3211 build_int_cst (NULL_TREE
, floor_log2 (coeff
)),
3214 /* Exclude cost of op0 from max_cost to match the cost
3215 calculation of the synth_mult. */
3216 max_cost
= rtx_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), SET
, speed
);
3217 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3219 return expand_mult_const (mode
, op0
, coeff
, target
,
3220 &algorithm
, variant
);
3224 if (GET_CODE (op0
) == CONST_DOUBLE
)
3231 /* Expand x*2.0 as x+x. */
3232 if (GET_CODE (op1
) == CONST_DOUBLE
3233 && SCALAR_FLOAT_MODE_P (mode
))
3236 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
3238 if (REAL_VALUES_EQUAL (d
, dconst2
))
3240 op0
= force_reg (GET_MODE (op0
), op0
);
3241 return expand_binop (mode
, add_optab
, op0
, op0
,
3242 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3246 /* This used to use umul_optab if unsigned, but for non-widening multiply
3247 there is no difference between signed and unsigned. */
3248 op0
= expand_binop (mode
,
3250 && flag_trapv
&& (GET_MODE_CLASS(mode
) == MODE_INT
)
3251 ? smulv_optab
: smul_optab
,
3252 op0
, op1
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
3257 /* Return the smallest n such that 2**n >= X. */
3260 ceil_log2 (unsigned HOST_WIDE_INT x
)
3262 return floor_log2 (x
- 1) + 1;
3265 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3266 replace division by D, and put the least significant N bits of the result
3267 in *MULTIPLIER_PTR and return the most significant bit.
3269 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3270 needed precision is in PRECISION (should be <= N).
3272 PRECISION should be as small as possible so this function can choose
3273 multiplier more freely.
3275 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3276 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3278 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3279 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3282 unsigned HOST_WIDE_INT
3283 choose_multiplier (unsigned HOST_WIDE_INT d
, int n
, int precision
,
3284 rtx
*multiplier_ptr
, int *post_shift_ptr
, int *lgup_ptr
)
3286 HOST_WIDE_INT mhigh_hi
, mlow_hi
;
3287 unsigned HOST_WIDE_INT mhigh_lo
, mlow_lo
;
3288 int lgup
, post_shift
;
3290 unsigned HOST_WIDE_INT nl
, dummy1
;
3291 HOST_WIDE_INT nh
, dummy2
;
3293 /* lgup = ceil(log2(divisor)); */
3294 lgup
= ceil_log2 (d
);
3296 gcc_assert (lgup
<= n
);
3299 pow2
= n
+ lgup
- precision
;
3301 /* We could handle this with some effort, but this case is much
3302 better handled directly with a scc insn, so rely on caller using
3304 gcc_assert (pow
!= 2 * HOST_BITS_PER_WIDE_INT
);
3306 /* mlow = 2^(N + lgup)/d */
3307 if (pow
>= HOST_BITS_PER_WIDE_INT
)
3309 nh
= (HOST_WIDE_INT
) 1 << (pow
- HOST_BITS_PER_WIDE_INT
);
3315 nl
= (unsigned HOST_WIDE_INT
) 1 << pow
;
3317 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
3318 &mlow_lo
, &mlow_hi
, &dummy1
, &dummy2
);
3320 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3321 if (pow2
>= HOST_BITS_PER_WIDE_INT
)
3322 nh
|= (HOST_WIDE_INT
) 1 << (pow2
- HOST_BITS_PER_WIDE_INT
);
3324 nl
|= (unsigned HOST_WIDE_INT
) 1 << pow2
;
3325 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
3326 &mhigh_lo
, &mhigh_hi
, &dummy1
, &dummy2
);
3328 gcc_assert (!mhigh_hi
|| nh
- d
< d
);
3329 gcc_assert (mhigh_hi
<= 1 && mlow_hi
<= 1);
3330 /* Assert that mlow < mhigh. */
3331 gcc_assert (mlow_hi
< mhigh_hi
3332 || (mlow_hi
== mhigh_hi
&& mlow_lo
< mhigh_lo
));
3334 /* If precision == N, then mlow, mhigh exceed 2^N
3335 (but they do not exceed 2^(N+1)). */
3337 /* Reduce to lowest terms. */
3338 for (post_shift
= lgup
; post_shift
> 0; post_shift
--)
3340 unsigned HOST_WIDE_INT ml_lo
= (mlow_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mlow_lo
>> 1);
3341 unsigned HOST_WIDE_INT mh_lo
= (mhigh_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mhigh_lo
>> 1);
3351 *post_shift_ptr
= post_shift
;
3353 if (n
< HOST_BITS_PER_WIDE_INT
)
3355 unsigned HOST_WIDE_INT mask
= ((unsigned HOST_WIDE_INT
) 1 << n
) - 1;
3356 *multiplier_ptr
= GEN_INT (mhigh_lo
& mask
);
3357 return mhigh_lo
>= mask
;
3361 *multiplier_ptr
= GEN_INT (mhigh_lo
);
3366 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3367 congruent to 1 (mod 2**N). */
3369 static unsigned HOST_WIDE_INT
3370 invert_mod2n (unsigned HOST_WIDE_INT x
, int n
)
3372 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3374 /* The algorithm notes that the choice y = x satisfies
3375 x*y == 1 mod 2^3, since x is assumed odd.
3376 Each iteration doubles the number of bits of significance in y. */
3378 unsigned HOST_WIDE_INT mask
;
3379 unsigned HOST_WIDE_INT y
= x
;
3382 mask
= (n
== HOST_BITS_PER_WIDE_INT
3383 ? ~(unsigned HOST_WIDE_INT
) 0
3384 : ((unsigned HOST_WIDE_INT
) 1 << n
) - 1);
3388 y
= y
* (2 - x
*y
) & mask
; /* Modulo 2^N */
3394 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3395 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3396 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3397 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3400 The result is put in TARGET if that is convenient.
3402 MODE is the mode of operation. */
3405 expand_mult_highpart_adjust (enum machine_mode mode
, rtx adj_operand
, rtx op0
,
3406 rtx op1
, rtx target
, int unsignedp
)
3409 enum rtx_code adj_code
= unsignedp
? PLUS
: MINUS
;
3411 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3412 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
) - 1),
3414 tem
= expand_and (mode
, tem
, op1
, NULL_RTX
);
3416 = force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3419 tem
= expand_shift (RSHIFT_EXPR
, mode
, op1
,
3420 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
) - 1),
3422 tem
= expand_and (mode
, tem
, op0
, NULL_RTX
);
3423 target
= force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3429 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3432 extract_high_half (enum machine_mode mode
, rtx op
)
3434 enum machine_mode wider_mode
;
3436 if (mode
== word_mode
)
3437 return gen_highpart (mode
, op
);
3439 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3441 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3442 op
= expand_shift (RSHIFT_EXPR
, wider_mode
, op
,
3443 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
)), 0, 1);
3444 return convert_modes (mode
, wider_mode
, op
, 0);
3447 /* Like expand_mult_highpart, but only consider using a multiplication
3448 optab. OP1 is an rtx for the constant operand. */
3451 expand_mult_highpart_optab (enum machine_mode mode
, rtx op0
, rtx op1
,
3452 rtx target
, int unsignedp
, int max_cost
)
3454 rtx narrow_op1
= gen_int_mode (INTVAL (op1
), mode
);
3455 enum machine_mode wider_mode
;
3459 bool speed
= optimize_insn_for_speed_p ();
3461 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3463 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3464 size
= GET_MODE_BITSIZE (mode
);
3466 /* Firstly, try using a multiplication insn that only generates the needed
3467 high part of the product, and in the sign flavor of unsignedp. */
3468 if (mul_highpart_cost
[speed
][mode
] < max_cost
)
3470 moptab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
3471 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3472 unsignedp
, OPTAB_DIRECT
);
3477 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3478 Need to adjust the result after the multiplication. */
3479 if (size
- 1 < BITS_PER_WORD
3480 && (mul_highpart_cost
[speed
][mode
] + 2 * shift_cost
[speed
][mode
][size
-1]
3481 + 4 * add_cost
[speed
][mode
] < max_cost
))
3483 moptab
= unsignedp
? smul_highpart_optab
: umul_highpart_optab
;
3484 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3485 unsignedp
, OPTAB_DIRECT
);
3487 /* We used the wrong signedness. Adjust the result. */
3488 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3492 /* Try widening multiplication. */
3493 moptab
= unsignedp
? umul_widen_optab
: smul_widen_optab
;
3494 if (optab_handler (moptab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
3495 && mul_widen_cost
[speed
][wider_mode
] < max_cost
)
3497 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
, 0,
3498 unsignedp
, OPTAB_WIDEN
);
3500 return extract_high_half (mode
, tem
);
3503 /* Try widening the mode and perform a non-widening multiplication. */
3504 if (optab_handler (smul_optab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
3505 && size
- 1 < BITS_PER_WORD
3506 && mul_cost
[speed
][wider_mode
] + shift_cost
[speed
][mode
][size
-1] < max_cost
)
3508 rtx insns
, wop0
, wop1
;
3510 /* We need to widen the operands, for example to ensure the
3511 constant multiplier is correctly sign or zero extended.
3512 Use a sequence to clean-up any instructions emitted by
3513 the conversions if things don't work out. */
3515 wop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
3516 wop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
3517 tem
= expand_binop (wider_mode
, smul_optab
, wop0
, wop1
, 0,
3518 unsignedp
, OPTAB_WIDEN
);
3519 insns
= get_insns ();
3525 return extract_high_half (mode
, tem
);
3529 /* Try widening multiplication of opposite signedness, and adjust. */
3530 moptab
= unsignedp
? smul_widen_optab
: umul_widen_optab
;
3531 if (optab_handler (moptab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
3532 && size
- 1 < BITS_PER_WORD
3533 && (mul_widen_cost
[speed
][wider_mode
] + 2 * shift_cost
[speed
][mode
][size
-1]
3534 + 4 * add_cost
[speed
][mode
] < max_cost
))
3536 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
,
3537 NULL_RTX
, ! unsignedp
, OPTAB_WIDEN
);
3540 tem
= extract_high_half (mode
, tem
);
3541 /* We used the wrong signedness. Adjust the result. */
3542 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3550 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3551 putting the high half of the result in TARGET if that is convenient,
3552 and return where the result is. If the operation can not be performed,
3555 MODE is the mode of operation and result.
3557 UNSIGNEDP nonzero means unsigned multiply.
3559 MAX_COST is the total allowed cost for the expanded RTL. */
3562 expand_mult_highpart (enum machine_mode mode
, rtx op0
, rtx op1
,
3563 rtx target
, int unsignedp
, int max_cost
)
3565 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
3566 unsigned HOST_WIDE_INT cnst1
;
3568 bool sign_adjust
= false;
3569 enum mult_variant variant
;
3570 struct algorithm alg
;
3572 bool speed
= optimize_insn_for_speed_p ();
3574 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3575 /* We can't support modes wider than HOST_BITS_PER_INT. */
3576 gcc_assert (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
);
3578 cnst1
= INTVAL (op1
) & GET_MODE_MASK (mode
);
3580 /* We can't optimize modes wider than BITS_PER_WORD.
3581 ??? We might be able to perform double-word arithmetic if
3582 mode == word_mode, however all the cost calculations in
3583 synth_mult etc. assume single-word operations. */
3584 if (GET_MODE_BITSIZE (wider_mode
) > BITS_PER_WORD
)
3585 return expand_mult_highpart_optab (mode
, op0
, op1
, target
,
3586 unsignedp
, max_cost
);
3588 extra_cost
= shift_cost
[speed
][mode
][GET_MODE_BITSIZE (mode
) - 1];
3590 /* Check whether we try to multiply by a negative constant. */
3591 if (!unsignedp
&& ((cnst1
>> (GET_MODE_BITSIZE (mode
) - 1)) & 1))
3594 extra_cost
+= add_cost
[speed
][mode
];
3597 /* See whether shift/add multiplication is cheap enough. */
3598 if (choose_mult_variant (wider_mode
, cnst1
, &alg
, &variant
,
3599 max_cost
- extra_cost
))
3601 /* See whether the specialized multiplication optabs are
3602 cheaper than the shift/add version. */
3603 tem
= expand_mult_highpart_optab (mode
, op0
, op1
, target
, unsignedp
,
3604 alg
.cost
.cost
+ extra_cost
);
3608 tem
= convert_to_mode (wider_mode
, op0
, unsignedp
);
3609 tem
= expand_mult_const (wider_mode
, tem
, cnst1
, 0, &alg
, variant
);
3610 tem
= extract_high_half (mode
, tem
);
3612 /* Adjust result for signedness. */
3614 tem
= force_operand (gen_rtx_MINUS (mode
, tem
, op0
), tem
);
3618 return expand_mult_highpart_optab (mode
, op0
, op1
, target
,
3619 unsignedp
, max_cost
);
3623 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3626 expand_smod_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3628 unsigned HOST_WIDE_INT masklow
, maskhigh
;
3629 rtx result
, temp
, shift
, label
;
3632 logd
= floor_log2 (d
);
3633 result
= gen_reg_rtx (mode
);
3635 /* Avoid conditional branches when they're expensive. */
3636 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3637 && optimize_insn_for_speed_p ())
3639 rtx signmask
= emit_store_flag (result
, LT
, op0
, const0_rtx
,
3643 signmask
= force_reg (mode
, signmask
);
3644 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3645 shift
= GEN_INT (GET_MODE_BITSIZE (mode
) - logd
);
3647 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3648 which instruction sequence to use. If logical right shifts
3649 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3650 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3652 temp
= gen_rtx_LSHIFTRT (mode
, result
, shift
);
3653 if (optab_handler (lshr_optab
, mode
)->insn_code
== CODE_FOR_nothing
3654 || rtx_cost (temp
, SET
, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
3656 temp
= expand_binop (mode
, xor_optab
, op0
, signmask
,
3657 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3658 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3659 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3660 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3661 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3662 temp
= expand_binop (mode
, xor_optab
, temp
, signmask
,
3663 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3664 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3665 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3669 signmask
= expand_binop (mode
, lshr_optab
, signmask
, shift
,
3670 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3671 signmask
= force_reg (mode
, signmask
);
3673 temp
= expand_binop (mode
, add_optab
, op0
, signmask
,
3674 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3675 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3676 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3677 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3678 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3684 /* Mask contains the mode's signbit and the significant bits of the
3685 modulus. By including the signbit in the operation, many targets
3686 can avoid an explicit compare operation in the following comparison
3689 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3690 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3692 masklow
|= (HOST_WIDE_INT
) -1 << (GET_MODE_BITSIZE (mode
) - 1);
3696 maskhigh
= (HOST_WIDE_INT
) -1
3697 << (GET_MODE_BITSIZE (mode
) - HOST_BITS_PER_WIDE_INT
- 1);
3699 temp
= expand_binop (mode
, and_optab
, op0
,
3700 immed_double_const (masklow
, maskhigh
, mode
),
3701 result
, 1, OPTAB_LIB_WIDEN
);
3703 emit_move_insn (result
, temp
);
3705 label
= gen_label_rtx ();
3706 do_cmp_and_jump (result
, const0_rtx
, GE
, mode
, label
);
3708 temp
= expand_binop (mode
, sub_optab
, result
, const1_rtx
, result
,
3709 0, OPTAB_LIB_WIDEN
);
3710 masklow
= (HOST_WIDE_INT
) -1 << logd
;
3712 temp
= expand_binop (mode
, ior_optab
, temp
,
3713 immed_double_const (masklow
, maskhigh
, mode
),
3714 result
, 1, OPTAB_LIB_WIDEN
);
3715 temp
= expand_binop (mode
, add_optab
, temp
, const1_rtx
, result
,
3716 0, OPTAB_LIB_WIDEN
);
3718 emit_move_insn (result
, temp
);
3723 /* Expand signed division of OP0 by a power of two D in mode MODE.
3724 This routine is only called for positive values of D. */
3727 expand_sdiv_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3733 logd
= floor_log2 (d
);
3734 shift
= build_int_cst (NULL_TREE
, logd
);
3737 && BRANCH_COST (optimize_insn_for_speed_p (),
3740 temp
= gen_reg_rtx (mode
);
3741 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, 1);
3742 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3743 0, OPTAB_LIB_WIDEN
);
3744 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3747 #ifdef HAVE_conditional_move
3748 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3753 /* ??? emit_conditional_move forces a stack adjustment via
3754 compare_from_rtx so, if the sequence is discarded, it will
3755 be lost. Do it now instead. */
3756 do_pending_stack_adjust ();
3759 temp2
= copy_to_mode_reg (mode
, op0
);
3760 temp
= expand_binop (mode
, add_optab
, temp2
, GEN_INT (d
-1),
3761 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3762 temp
= force_reg (mode
, temp
);
3764 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3765 temp2
= emit_conditional_move (temp2
, LT
, temp2
, const0_rtx
,
3766 mode
, temp
, temp2
, mode
, 0);
3769 rtx seq
= get_insns ();
3772 return expand_shift (RSHIFT_EXPR
, mode
, temp2
, shift
, NULL_RTX
, 0);
3778 if (BRANCH_COST (optimize_insn_for_speed_p (),
3781 int ushift
= GET_MODE_BITSIZE (mode
) - logd
;
3783 temp
= gen_reg_rtx (mode
);
3784 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, -1);
3785 if (shift_cost
[optimize_insn_for_speed_p ()][mode
][ushift
] > COSTS_N_INSNS (1))
3786 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (d
- 1),
3787 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3789 temp
= expand_shift (RSHIFT_EXPR
, mode
, temp
,
3790 build_int_cst (NULL_TREE
, ushift
),
3792 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3793 0, OPTAB_LIB_WIDEN
);
3794 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3797 label
= gen_label_rtx ();
3798 temp
= copy_to_mode_reg (mode
, op0
);
3799 do_cmp_and_jump (temp
, const0_rtx
, GE
, mode
, label
);
3800 expand_inc (temp
, GEN_INT (d
- 1));
3802 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3805 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3806 if that is convenient, and returning where the result is.
3807 You may request either the quotient or the remainder as the result;
3808 specify REM_FLAG nonzero to get the remainder.
3810 CODE is the expression code for which kind of division this is;
3811 it controls how rounding is done. MODE is the machine mode to use.
3812 UNSIGNEDP nonzero means do unsigned division. */
3814 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3815 and then correct it by or'ing in missing high bits
3816 if result of ANDI is nonzero.
3817 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3818 This could optimize to a bfexts instruction.
3819 But C doesn't use these operations, so their optimizations are
3821 /* ??? For modulo, we don't actually need the highpart of the first product,
3822 the low part will do nicely. And for small divisors, the second multiply
3823 can also be a low-part only multiply or even be completely left out.
3824 E.g. to calculate the remainder of a division by 3 with a 32 bit
3825 multiply, multiply with 0x55555556 and extract the upper two bits;
3826 the result is exact for inputs up to 0x1fffffff.
3827 The input range can be reduced by using cross-sum rules.
3828 For odd divisors >= 3, the following table gives right shift counts
3829 so that if a number is shifted by an integer multiple of the given
3830 amount, the remainder stays the same:
3831 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3832 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3833 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3834 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3835 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3837 Cross-sum rules for even numbers can be derived by leaving as many bits
3838 to the right alone as the divisor has zeros to the right.
3839 E.g. if x is an unsigned 32 bit number:
3840 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3844 expand_divmod (int rem_flag
, enum tree_code code
, enum machine_mode mode
,
3845 rtx op0
, rtx op1
, rtx target
, int unsignedp
)
3847 enum machine_mode compute_mode
;
3849 rtx quotient
= 0, remainder
= 0;
3853 optab optab1
, optab2
;
3854 int op1_is_constant
, op1_is_pow2
= 0;
3855 int max_cost
, extra_cost
;
3856 static HOST_WIDE_INT last_div_const
= 0;
3857 static HOST_WIDE_INT ext_op1
;
3858 bool speed
= optimize_insn_for_speed_p ();
3860 op1_is_constant
= CONST_INT_P (op1
);
3861 if (op1_is_constant
)
3863 ext_op1
= INTVAL (op1
);
3865 ext_op1
&= GET_MODE_MASK (mode
);
3866 op1_is_pow2
= ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1
)
3867 || (! unsignedp
&& EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1
))));
3871 This is the structure of expand_divmod:
3873 First comes code to fix up the operands so we can perform the operations
3874 correctly and efficiently.
3876 Second comes a switch statement with code specific for each rounding mode.
3877 For some special operands this code emits all RTL for the desired
3878 operation, for other cases, it generates only a quotient and stores it in
3879 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3880 to indicate that it has not done anything.
3882 Last comes code that finishes the operation. If QUOTIENT is set and
3883 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3884 QUOTIENT is not set, it is computed using trunc rounding.
3886 We try to generate special code for division and remainder when OP1 is a
3887 constant. If |OP1| = 2**n we can use shifts and some other fast
3888 operations. For other values of OP1, we compute a carefully selected
3889 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3892 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3893 half of the product. Different strategies for generating the product are
3894 implemented in expand_mult_highpart.
3896 If what we actually want is the remainder, we generate that by another
3897 by-constant multiplication and a subtraction. */
3899 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3900 code below will malfunction if we are, so check here and handle
3901 the special case if so. */
3902 if (op1
== const1_rtx
)
3903 return rem_flag
? const0_rtx
: op0
;
3905 /* When dividing by -1, we could get an overflow.
3906 negv_optab can handle overflows. */
3907 if (! unsignedp
&& op1
== constm1_rtx
)
3911 return expand_unop (mode
, flag_trapv
&& GET_MODE_CLASS(mode
) == MODE_INT
3912 ? negv_optab
: neg_optab
, op0
, target
, 0);
3916 /* Don't use the function value register as a target
3917 since we have to read it as well as write it,
3918 and function-inlining gets confused by this. */
3919 && ((REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
3920 /* Don't clobber an operand while doing a multi-step calculation. */
3921 || ((rem_flag
|| op1_is_constant
)
3922 && (reg_mentioned_p (target
, op0
)
3923 || (MEM_P (op0
) && MEM_P (target
))))
3924 || reg_mentioned_p (target
, op1
)
3925 || (MEM_P (op1
) && MEM_P (target
))))
3928 /* Get the mode in which to perform this computation. Normally it will
3929 be MODE, but sometimes we can't do the desired operation in MODE.
3930 If so, pick a wider mode in which we can do the operation. Convert
3931 to that mode at the start to avoid repeated conversions.
3933 First see what operations we need. These depend on the expression
3934 we are evaluating. (We assume that divxx3 insns exist under the
3935 same conditions that modxx3 insns and that these insns don't normally
3936 fail. If these assumptions are not correct, we may generate less
3937 efficient code in some cases.)
3939 Then see if we find a mode in which we can open-code that operation
3940 (either a division, modulus, or shift). Finally, check for the smallest
3941 mode for which we can do the operation with a library call. */
3943 /* We might want to refine this now that we have division-by-constant
3944 optimization. Since expand_mult_highpart tries so many variants, it is
3945 not straightforward to generalize this. Maybe we should make an array
3946 of possible modes in init_expmed? Save this for GCC 2.7. */
3948 optab1
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3949 ? (unsignedp
? lshr_optab
: ashr_optab
)
3950 : (unsignedp
? udiv_optab
: sdiv_optab
));
3951 optab2
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3953 : (unsignedp
? udivmod_optab
: sdivmod_optab
));
3955 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3956 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3957 if (optab_handler (optab1
, compute_mode
)->insn_code
!= CODE_FOR_nothing
3958 || optab_handler (optab2
, compute_mode
)->insn_code
!= CODE_FOR_nothing
)
3961 if (compute_mode
== VOIDmode
)
3962 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3963 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3964 if (optab_libfunc (optab1
, compute_mode
)
3965 || optab_libfunc (optab2
, compute_mode
))
3968 /* If we still couldn't find a mode, use MODE, but expand_binop will
3970 if (compute_mode
== VOIDmode
)
3971 compute_mode
= mode
;
3973 if (target
&& GET_MODE (target
) == compute_mode
)
3976 tquotient
= gen_reg_rtx (compute_mode
);
3978 size
= GET_MODE_BITSIZE (compute_mode
);
3980 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3981 (mode), and thereby get better code when OP1 is a constant. Do that
3982 later. It will require going over all usages of SIZE below. */
3983 size
= GET_MODE_BITSIZE (mode
);
3986 /* Only deduct something for a REM if the last divide done was
3987 for a different constant. Then set the constant of the last
3989 max_cost
= unsignedp
? udiv_cost
[speed
][compute_mode
] : sdiv_cost
[speed
][compute_mode
];
3990 if (rem_flag
&& ! (last_div_const
!= 0 && op1_is_constant
3991 && INTVAL (op1
) == last_div_const
))
3992 max_cost
-= mul_cost
[speed
][compute_mode
] + add_cost
[speed
][compute_mode
];
3994 last_div_const
= ! rem_flag
&& op1_is_constant
? INTVAL (op1
) : 0;
3996 /* Now convert to the best mode to use. */
3997 if (compute_mode
!= mode
)
3999 op0
= convert_modes (compute_mode
, mode
, op0
, unsignedp
);
4000 op1
= convert_modes (compute_mode
, mode
, op1
, unsignedp
);
4002 /* convert_modes may have placed op1 into a register, so we
4003 must recompute the following. */
4004 op1_is_constant
= CONST_INT_P (op1
);
4005 op1_is_pow2
= (op1_is_constant
4006 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4008 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1
)))))) ;
4011 /* If one of the operands is a volatile MEM, copy it into a register. */
4013 if (MEM_P (op0
) && MEM_VOLATILE_P (op0
))
4014 op0
= force_reg (compute_mode
, op0
);
4015 if (MEM_P (op1
) && MEM_VOLATILE_P (op1
))
4016 op1
= force_reg (compute_mode
, op1
);
4018 /* If we need the remainder or if OP1 is constant, we need to
4019 put OP0 in a register in case it has any queued subexpressions. */
4020 if (rem_flag
|| op1_is_constant
)
4021 op0
= force_reg (compute_mode
, op0
);
4023 last
= get_last_insn ();
4025 /* Promote floor rounding to trunc rounding for unsigned operations. */
4028 if (code
== FLOOR_DIV_EXPR
)
4029 code
= TRUNC_DIV_EXPR
;
4030 if (code
== FLOOR_MOD_EXPR
)
4031 code
= TRUNC_MOD_EXPR
;
4032 if (code
== EXACT_DIV_EXPR
&& op1_is_pow2
)
4033 code
= TRUNC_DIV_EXPR
;
4036 if (op1
!= const0_rtx
)
4039 case TRUNC_MOD_EXPR
:
4040 case TRUNC_DIV_EXPR
:
4041 if (op1_is_constant
)
4045 unsigned HOST_WIDE_INT mh
;
4046 int pre_shift
, post_shift
;
4049 unsigned HOST_WIDE_INT d
= (INTVAL (op1
)
4050 & GET_MODE_MASK (compute_mode
));
4052 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4054 pre_shift
= floor_log2 (d
);
4058 = expand_binop (compute_mode
, and_optab
, op0
,
4059 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4063 return gen_lowpart (mode
, remainder
);
4065 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4066 build_int_cst (NULL_TREE
,
4070 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4072 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (size
- 1)))
4074 /* Most significant bit of divisor is set; emit an scc
4076 quotient
= emit_store_flag_force (tquotient
, GEU
, op0
, op1
,
4077 compute_mode
, 1, 1);
4081 /* Find a suitable multiplier and right shift count
4082 instead of multiplying with D. */
4084 mh
= choose_multiplier (d
, size
, size
,
4085 &ml
, &post_shift
, &dummy
);
4087 /* If the suggested multiplier is more than SIZE bits,
4088 we can do better for even divisors, using an
4089 initial right shift. */
4090 if (mh
!= 0 && (d
& 1) == 0)
4092 pre_shift
= floor_log2 (d
& -d
);
4093 mh
= choose_multiplier (d
>> pre_shift
, size
,
4095 &ml
, &post_shift
, &dummy
);
4105 if (post_shift
- 1 >= BITS_PER_WORD
)
4109 = (shift_cost
[speed
][compute_mode
][post_shift
- 1]
4110 + shift_cost
[speed
][compute_mode
][1]
4111 + 2 * add_cost
[speed
][compute_mode
]);
4112 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
4114 max_cost
- extra_cost
);
4117 t2
= force_operand (gen_rtx_MINUS (compute_mode
,
4121 (RSHIFT_EXPR
, compute_mode
, t2
,
4122 build_int_cst (NULL_TREE
, 1),
4124 t4
= force_operand (gen_rtx_PLUS (compute_mode
,
4127 quotient
= expand_shift
4128 (RSHIFT_EXPR
, compute_mode
, t4
,
4129 build_int_cst (NULL_TREE
, post_shift
- 1),
4136 if (pre_shift
>= BITS_PER_WORD
4137 || post_shift
>= BITS_PER_WORD
)
4141 (RSHIFT_EXPR
, compute_mode
, op0
,
4142 build_int_cst (NULL_TREE
, pre_shift
),
4145 = (shift_cost
[speed
][compute_mode
][pre_shift
]
4146 + shift_cost
[speed
][compute_mode
][post_shift
]);
4147 t2
= expand_mult_highpart (compute_mode
, t1
, ml
,
4149 max_cost
- extra_cost
);
4152 quotient
= expand_shift
4153 (RSHIFT_EXPR
, compute_mode
, t2
,
4154 build_int_cst (NULL_TREE
, post_shift
),
4159 else /* Too wide mode to use tricky code */
4162 insn
= get_last_insn ();
4164 && (set
= single_set (insn
)) != 0
4165 && SET_DEST (set
) == quotient
)
4166 set_unique_reg_note (insn
,
4168 gen_rtx_UDIV (compute_mode
, op0
, op1
));
4170 else /* TRUNC_DIV, signed */
4172 unsigned HOST_WIDE_INT ml
;
4173 int lgup
, post_shift
;
4175 HOST_WIDE_INT d
= INTVAL (op1
);
4176 unsigned HOST_WIDE_INT abs_d
;
4178 /* Since d might be INT_MIN, we have to cast to
4179 unsigned HOST_WIDE_INT before negating to avoid
4180 undefined signed overflow. */
4182 ? (unsigned HOST_WIDE_INT
) d
4183 : - (unsigned HOST_WIDE_INT
) d
);
4185 /* n rem d = n rem -d */
4186 if (rem_flag
&& d
< 0)
4189 op1
= gen_int_mode (abs_d
, compute_mode
);
4195 quotient
= expand_unop (compute_mode
, neg_optab
, op0
,
4197 else if (HOST_BITS_PER_WIDE_INT
>= size
4198 && abs_d
== (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4200 /* This case is not handled correctly below. */
4201 quotient
= emit_store_flag (tquotient
, EQ
, op0
, op1
,
4202 compute_mode
, 1, 1);
4206 else if (EXACT_POWER_OF_2_OR_ZERO_P (d
)
4207 && (rem_flag
? smod_pow2_cheap
[speed
][compute_mode
]
4208 : sdiv_pow2_cheap
[speed
][compute_mode
])
4209 /* We assume that cheap metric is true if the
4210 optab has an expander for this mode. */
4211 && ((optab_handler ((rem_flag
? smod_optab
4213 compute_mode
)->insn_code
4214 != CODE_FOR_nothing
)
4215 || (optab_handler(sdivmod_optab
,
4217 ->insn_code
!= CODE_FOR_nothing
)))
4219 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d
))
4223 remainder
= expand_smod_pow2 (compute_mode
, op0
, d
);
4225 return gen_lowpart (mode
, remainder
);
4228 if (sdiv_pow2_cheap
[speed
][compute_mode
]
4229 && ((optab_handler (sdiv_optab
, compute_mode
)->insn_code
4230 != CODE_FOR_nothing
)
4231 || (optab_handler (sdivmod_optab
, compute_mode
)->insn_code
4232 != CODE_FOR_nothing
)))
4233 quotient
= expand_divmod (0, TRUNC_DIV_EXPR
,
4235 gen_int_mode (abs_d
,
4239 quotient
= expand_sdiv_pow2 (compute_mode
, op0
, abs_d
);
4241 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4242 negate the quotient. */
4245 insn
= get_last_insn ();
4247 && (set
= single_set (insn
)) != 0
4248 && SET_DEST (set
) == quotient
4249 && abs_d
< ((unsigned HOST_WIDE_INT
) 1
4250 << (HOST_BITS_PER_WIDE_INT
- 1)))
4251 set_unique_reg_note (insn
,
4253 gen_rtx_DIV (compute_mode
,
4260 quotient
= expand_unop (compute_mode
, neg_optab
,
4261 quotient
, quotient
, 0);
4264 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4266 choose_multiplier (abs_d
, size
, size
- 1,
4267 &mlr
, &post_shift
, &lgup
);
4268 ml
= (unsigned HOST_WIDE_INT
) INTVAL (mlr
);
4269 if (ml
< (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4273 if (post_shift
>= BITS_PER_WORD
4274 || size
- 1 >= BITS_PER_WORD
)
4277 extra_cost
= (shift_cost
[speed
][compute_mode
][post_shift
]
4278 + shift_cost
[speed
][compute_mode
][size
- 1]
4279 + add_cost
[speed
][compute_mode
]);
4280 t1
= expand_mult_highpart (compute_mode
, op0
, mlr
,
4282 max_cost
- extra_cost
);
4286 (RSHIFT_EXPR
, compute_mode
, t1
,
4287 build_int_cst (NULL_TREE
, post_shift
),
4290 (RSHIFT_EXPR
, compute_mode
, op0
,
4291 build_int_cst (NULL_TREE
, size
- 1),
4295 = force_operand (gen_rtx_MINUS (compute_mode
,
4300 = force_operand (gen_rtx_MINUS (compute_mode
,
4308 if (post_shift
>= BITS_PER_WORD
4309 || size
- 1 >= BITS_PER_WORD
)
4312 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (size
- 1);
4313 mlr
= gen_int_mode (ml
, compute_mode
);
4314 extra_cost
= (shift_cost
[speed
][compute_mode
][post_shift
]
4315 + shift_cost
[speed
][compute_mode
][size
- 1]
4316 + 2 * add_cost
[speed
][compute_mode
]);
4317 t1
= expand_mult_highpart (compute_mode
, op0
, mlr
,
4319 max_cost
- extra_cost
);
4322 t2
= force_operand (gen_rtx_PLUS (compute_mode
,
4326 (RSHIFT_EXPR
, compute_mode
, t2
,
4327 build_int_cst (NULL_TREE
, post_shift
),
4330 (RSHIFT_EXPR
, compute_mode
, op0
,
4331 build_int_cst (NULL_TREE
, size
- 1),
4335 = force_operand (gen_rtx_MINUS (compute_mode
,
4340 = force_operand (gen_rtx_MINUS (compute_mode
,
4345 else /* Too wide mode to use tricky code */
4348 insn
= get_last_insn ();
4350 && (set
= single_set (insn
)) != 0
4351 && SET_DEST (set
) == quotient
)
4352 set_unique_reg_note (insn
,
4354 gen_rtx_DIV (compute_mode
, op0
, op1
));
4359 delete_insns_since (last
);
4362 case FLOOR_DIV_EXPR
:
4363 case FLOOR_MOD_EXPR
:
4364 /* We will come here only for signed operations. */
4365 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4367 unsigned HOST_WIDE_INT mh
;
4368 int pre_shift
, lgup
, post_shift
;
4369 HOST_WIDE_INT d
= INTVAL (op1
);
4374 /* We could just as easily deal with negative constants here,
4375 but it does not seem worth the trouble for GCC 2.6. */
4376 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4378 pre_shift
= floor_log2 (d
);
4381 remainder
= expand_binop (compute_mode
, and_optab
, op0
,
4382 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4383 remainder
, 0, OPTAB_LIB_WIDEN
);
4385 return gen_lowpart (mode
, remainder
);
4387 quotient
= expand_shift
4388 (RSHIFT_EXPR
, compute_mode
, op0
,
4389 build_int_cst (NULL_TREE
, pre_shift
),
4396 mh
= choose_multiplier (d
, size
, size
- 1,
4397 &ml
, &post_shift
, &lgup
);
4400 if (post_shift
< BITS_PER_WORD
4401 && size
- 1 < BITS_PER_WORD
)
4404 (RSHIFT_EXPR
, compute_mode
, op0
,
4405 build_int_cst (NULL_TREE
, size
- 1),
4407 t2
= expand_binop (compute_mode
, xor_optab
, op0
, t1
,
4408 NULL_RTX
, 0, OPTAB_WIDEN
);
4409 extra_cost
= (shift_cost
[speed
][compute_mode
][post_shift
]
4410 + shift_cost
[speed
][compute_mode
][size
- 1]
4411 + 2 * add_cost
[speed
][compute_mode
]);
4412 t3
= expand_mult_highpart (compute_mode
, t2
, ml
,
4414 max_cost
- extra_cost
);
4418 (RSHIFT_EXPR
, compute_mode
, t3
,
4419 build_int_cst (NULL_TREE
, post_shift
),
4421 quotient
= expand_binop (compute_mode
, xor_optab
,
4422 t4
, t1
, tquotient
, 0,
4430 rtx nsign
, t1
, t2
, t3
, t4
;
4431 t1
= force_operand (gen_rtx_PLUS (compute_mode
,
4432 op0
, constm1_rtx
), NULL_RTX
);
4433 t2
= expand_binop (compute_mode
, ior_optab
, op0
, t1
, NULL_RTX
,
4435 nsign
= expand_shift
4436 (RSHIFT_EXPR
, compute_mode
, t2
,
4437 build_int_cst (NULL_TREE
, size
- 1),
4439 t3
= force_operand (gen_rtx_MINUS (compute_mode
, t1
, nsign
),
4441 t4
= expand_divmod (0, TRUNC_DIV_EXPR
, compute_mode
, t3
, op1
,
4446 t5
= expand_unop (compute_mode
, one_cmpl_optab
, nsign
,
4448 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4457 delete_insns_since (last
);
4459 /* Try using an instruction that produces both the quotient and
4460 remainder, using truncation. We can easily compensate the quotient
4461 or remainder to get floor rounding, once we have the remainder.
4462 Notice that we compute also the final remainder value here,
4463 and return the result right away. */
4464 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4465 target
= gen_reg_rtx (compute_mode
);
4470 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4471 quotient
= gen_reg_rtx (compute_mode
);
4476 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4477 remainder
= gen_reg_rtx (compute_mode
);
4480 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
,
4481 quotient
, remainder
, 0))
4483 /* This could be computed with a branch-less sequence.
4484 Save that for later. */
4486 rtx label
= gen_label_rtx ();
4487 do_cmp_and_jump (remainder
, const0_rtx
, EQ
, compute_mode
, label
);
4488 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4489 NULL_RTX
, 0, OPTAB_WIDEN
);
4490 do_cmp_and_jump (tem
, const0_rtx
, GE
, compute_mode
, label
);
4491 expand_dec (quotient
, const1_rtx
);
4492 expand_inc (remainder
, op1
);
4494 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4497 /* No luck with division elimination or divmod. Have to do it
4498 by conditionally adjusting op0 *and* the result. */
4500 rtx label1
, label2
, label3
, label4
, label5
;
4504 quotient
= gen_reg_rtx (compute_mode
);
4505 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4506 label1
= gen_label_rtx ();
4507 label2
= gen_label_rtx ();
4508 label3
= gen_label_rtx ();
4509 label4
= gen_label_rtx ();
4510 label5
= gen_label_rtx ();
4511 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4512 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
, compute_mode
, label1
);
4513 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4514 quotient
, 0, OPTAB_LIB_WIDEN
);
4515 if (tem
!= quotient
)
4516 emit_move_insn (quotient
, tem
);
4517 emit_jump_insn (gen_jump (label5
));
4519 emit_label (label1
);
4520 expand_inc (adjusted_op0
, const1_rtx
);
4521 emit_jump_insn (gen_jump (label4
));
4523 emit_label (label2
);
4524 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
, compute_mode
, label3
);
4525 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4526 quotient
, 0, OPTAB_LIB_WIDEN
);
4527 if (tem
!= quotient
)
4528 emit_move_insn (quotient
, tem
);
4529 emit_jump_insn (gen_jump (label5
));
4531 emit_label (label3
);
4532 expand_dec (adjusted_op0
, const1_rtx
);
4533 emit_label (label4
);
4534 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4535 quotient
, 0, OPTAB_LIB_WIDEN
);
4536 if (tem
!= quotient
)
4537 emit_move_insn (quotient
, tem
);
4538 expand_dec (quotient
, const1_rtx
);
4539 emit_label (label5
);
4547 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
)))
4550 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4551 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4552 build_int_cst (NULL_TREE
, floor_log2 (d
)),
4554 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4556 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4557 t3
= gen_reg_rtx (compute_mode
);
4558 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4559 compute_mode
, 1, 1);
4563 lab
= gen_label_rtx ();
4564 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4565 expand_inc (t1
, const1_rtx
);
4570 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4576 /* Try using an instruction that produces both the quotient and
4577 remainder, using truncation. We can easily compensate the
4578 quotient or remainder to get ceiling rounding, once we have the
4579 remainder. Notice that we compute also the final remainder
4580 value here, and return the result right away. */
4581 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4582 target
= gen_reg_rtx (compute_mode
);
4586 remainder
= (REG_P (target
)
4587 ? target
: gen_reg_rtx (compute_mode
));
4588 quotient
= gen_reg_rtx (compute_mode
);
4592 quotient
= (REG_P (target
)
4593 ? target
: gen_reg_rtx (compute_mode
));
4594 remainder
= gen_reg_rtx (compute_mode
);
4597 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
,
4600 /* This could be computed with a branch-less sequence.
4601 Save that for later. */
4602 rtx label
= gen_label_rtx ();
4603 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4604 compute_mode
, label
);
4605 expand_inc (quotient
, const1_rtx
);
4606 expand_dec (remainder
, op1
);
4608 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4611 /* No luck with division elimination or divmod. Have to do it
4612 by conditionally adjusting op0 *and* the result. */
4615 rtx adjusted_op0
, tem
;
4617 quotient
= gen_reg_rtx (compute_mode
);
4618 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4619 label1
= gen_label_rtx ();
4620 label2
= gen_label_rtx ();
4621 do_cmp_and_jump (adjusted_op0
, const0_rtx
, NE
,
4622 compute_mode
, label1
);
4623 emit_move_insn (quotient
, const0_rtx
);
4624 emit_jump_insn (gen_jump (label2
));
4626 emit_label (label1
);
4627 expand_dec (adjusted_op0
, const1_rtx
);
4628 tem
= expand_binop (compute_mode
, udiv_optab
, adjusted_op0
, op1
,
4629 quotient
, 1, OPTAB_LIB_WIDEN
);
4630 if (tem
!= quotient
)
4631 emit_move_insn (quotient
, tem
);
4632 expand_inc (quotient
, const1_rtx
);
4633 emit_label (label2
);
4638 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4639 && INTVAL (op1
) >= 0)
4641 /* This is extremely similar to the code for the unsigned case
4642 above. For 2.7 we should merge these variants, but for
4643 2.6.1 I don't want to touch the code for unsigned since that
4644 get used in C. The signed case will only be used by other
4648 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4649 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4650 build_int_cst (NULL_TREE
, floor_log2 (d
)),
4652 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4654 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4655 t3
= gen_reg_rtx (compute_mode
);
4656 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4657 compute_mode
, 1, 1);
4661 lab
= gen_label_rtx ();
4662 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4663 expand_inc (t1
, const1_rtx
);
4668 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4674 /* Try using an instruction that produces both the quotient and
4675 remainder, using truncation. We can easily compensate the
4676 quotient or remainder to get ceiling rounding, once we have the
4677 remainder. Notice that we compute also the final remainder
4678 value here, and return the result right away. */
4679 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4680 target
= gen_reg_rtx (compute_mode
);
4683 remainder
= (REG_P (target
)
4684 ? target
: gen_reg_rtx (compute_mode
));
4685 quotient
= gen_reg_rtx (compute_mode
);
4689 quotient
= (REG_P (target
)
4690 ? target
: gen_reg_rtx (compute_mode
));
4691 remainder
= gen_reg_rtx (compute_mode
);
4694 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
,
4697 /* This could be computed with a branch-less sequence.
4698 Save that for later. */
4700 rtx label
= gen_label_rtx ();
4701 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4702 compute_mode
, label
);
4703 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4704 NULL_RTX
, 0, OPTAB_WIDEN
);
4705 do_cmp_and_jump (tem
, const0_rtx
, LT
, compute_mode
, label
);
4706 expand_inc (quotient
, const1_rtx
);
4707 expand_dec (remainder
, op1
);
4709 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4712 /* No luck with division elimination or divmod. Have to do it
4713 by conditionally adjusting op0 *and* the result. */
4715 rtx label1
, label2
, label3
, label4
, label5
;
4719 quotient
= gen_reg_rtx (compute_mode
);
4720 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4721 label1
= gen_label_rtx ();
4722 label2
= gen_label_rtx ();
4723 label3
= gen_label_rtx ();
4724 label4
= gen_label_rtx ();
4725 label5
= gen_label_rtx ();
4726 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4727 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
,
4728 compute_mode
, label1
);
4729 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4730 quotient
, 0, OPTAB_LIB_WIDEN
);
4731 if (tem
!= quotient
)
4732 emit_move_insn (quotient
, tem
);
4733 emit_jump_insn (gen_jump (label5
));
4735 emit_label (label1
);
4736 expand_dec (adjusted_op0
, const1_rtx
);
4737 emit_jump_insn (gen_jump (label4
));
4739 emit_label (label2
);
4740 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
,
4741 compute_mode
, label3
);
4742 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4743 quotient
, 0, OPTAB_LIB_WIDEN
);
4744 if (tem
!= quotient
)
4745 emit_move_insn (quotient
, tem
);
4746 emit_jump_insn (gen_jump (label5
));
4748 emit_label (label3
);
4749 expand_inc (adjusted_op0
, const1_rtx
);
4750 emit_label (label4
);
4751 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4752 quotient
, 0, OPTAB_LIB_WIDEN
);
4753 if (tem
!= quotient
)
4754 emit_move_insn (quotient
, tem
);
4755 expand_inc (quotient
, const1_rtx
);
4756 emit_label (label5
);
4761 case EXACT_DIV_EXPR
:
4762 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4764 HOST_WIDE_INT d
= INTVAL (op1
);
4765 unsigned HOST_WIDE_INT ml
;
4769 pre_shift
= floor_log2 (d
& -d
);
4770 ml
= invert_mod2n (d
>> pre_shift
, size
);
4771 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4772 build_int_cst (NULL_TREE
, pre_shift
),
4773 NULL_RTX
, unsignedp
);
4774 quotient
= expand_mult (compute_mode
, t1
,
4775 gen_int_mode (ml
, compute_mode
),
4778 insn
= get_last_insn ();
4779 set_unique_reg_note (insn
,
4781 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
4787 case ROUND_DIV_EXPR
:
4788 case ROUND_MOD_EXPR
:
4793 label
= gen_label_rtx ();
4794 quotient
= gen_reg_rtx (compute_mode
);
4795 remainder
= gen_reg_rtx (compute_mode
);
4796 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
, remainder
, 1) == 0)
4799 quotient
= expand_binop (compute_mode
, udiv_optab
, op0
, op1
,
4800 quotient
, 1, OPTAB_LIB_WIDEN
);
4801 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 1);
4802 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4803 remainder
, 1, OPTAB_LIB_WIDEN
);
4805 tem
= plus_constant (op1
, -1);
4806 tem
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4807 build_int_cst (NULL_TREE
, 1),
4809 do_cmp_and_jump (remainder
, tem
, LEU
, compute_mode
, label
);
4810 expand_inc (quotient
, const1_rtx
);
4811 expand_dec (remainder
, op1
);
4816 rtx abs_rem
, abs_op1
, tem
, mask
;
4818 label
= gen_label_rtx ();
4819 quotient
= gen_reg_rtx (compute_mode
);
4820 remainder
= gen_reg_rtx (compute_mode
);
4821 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
, remainder
, 0) == 0)
4824 quotient
= expand_binop (compute_mode
, sdiv_optab
, op0
, op1
,
4825 quotient
, 0, OPTAB_LIB_WIDEN
);
4826 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 0);
4827 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4828 remainder
, 0, OPTAB_LIB_WIDEN
);
4830 abs_rem
= expand_abs (compute_mode
, remainder
, NULL_RTX
, 1, 0);
4831 abs_op1
= expand_abs (compute_mode
, op1
, NULL_RTX
, 1, 0);
4832 tem
= expand_shift (LSHIFT_EXPR
, compute_mode
, abs_rem
,
4833 build_int_cst (NULL_TREE
, 1),
4835 do_cmp_and_jump (tem
, abs_op1
, LTU
, compute_mode
, label
);
4836 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4837 NULL_RTX
, 0, OPTAB_WIDEN
);
4838 mask
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4839 build_int_cst (NULL_TREE
, size
- 1),
4841 tem
= expand_binop (compute_mode
, xor_optab
, mask
, const1_rtx
,
4842 NULL_RTX
, 0, OPTAB_WIDEN
);
4843 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4844 NULL_RTX
, 0, OPTAB_WIDEN
);
4845 expand_inc (quotient
, tem
);
4846 tem
= expand_binop (compute_mode
, xor_optab
, mask
, op1
,
4847 NULL_RTX
, 0, OPTAB_WIDEN
);
4848 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4849 NULL_RTX
, 0, OPTAB_WIDEN
);
4850 expand_dec (remainder
, tem
);
4853 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4861 if (target
&& GET_MODE (target
) != compute_mode
)
4866 /* Try to produce the remainder without producing the quotient.
4867 If we seem to have a divmod pattern that does not require widening,
4868 don't try widening here. We should really have a WIDEN argument
4869 to expand_twoval_binop, since what we'd really like to do here is
4870 1) try a mod insn in compute_mode
4871 2) try a divmod insn in compute_mode
4872 3) try a div insn in compute_mode and multiply-subtract to get
4874 4) try the same things with widening allowed. */
4876 = sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4879 ((optab_handler (optab2
, compute_mode
)->insn_code
4880 != CODE_FOR_nothing
)
4881 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4884 /* No luck there. Can we do remainder and divide at once
4885 without a library call? */
4886 remainder
= gen_reg_rtx (compute_mode
);
4887 if (! expand_twoval_binop ((unsignedp
4891 NULL_RTX
, remainder
, unsignedp
))
4896 return gen_lowpart (mode
, remainder
);
4899 /* Produce the quotient. Try a quotient insn, but not a library call.
4900 If we have a divmod in this mode, use it in preference to widening
4901 the div (for this test we assume it will not fail). Note that optab2
4902 is set to the one of the two optabs that the call below will use. */
4904 = sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
4905 op0
, op1
, rem_flag
? NULL_RTX
: target
,
4907 ((optab_handler (optab2
, compute_mode
)->insn_code
4908 != CODE_FOR_nothing
)
4909 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4913 /* No luck there. Try a quotient-and-remainder insn,
4914 keeping the quotient alone. */
4915 quotient
= gen_reg_rtx (compute_mode
);
4916 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
4918 quotient
, NULL_RTX
, unsignedp
))
4922 /* Still no luck. If we are not computing the remainder,
4923 use a library call for the quotient. */
4924 quotient
= sign_expand_binop (compute_mode
,
4925 udiv_optab
, sdiv_optab
,
4927 unsignedp
, OPTAB_LIB_WIDEN
);
4934 if (target
&& GET_MODE (target
) != compute_mode
)
4939 /* No divide instruction either. Use library for remainder. */
4940 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4942 unsignedp
, OPTAB_LIB_WIDEN
);
4943 /* No remainder function. Try a quotient-and-remainder
4944 function, keeping the remainder. */
4947 remainder
= gen_reg_rtx (compute_mode
);
4948 if (!expand_twoval_binop_libfunc
4949 (unsignedp
? udivmod_optab
: sdivmod_optab
,
4951 NULL_RTX
, remainder
,
4952 unsignedp
? UMOD
: MOD
))
4953 remainder
= NULL_RTX
;
4958 /* We divided. Now finish doing X - Y * (X / Y). */
4959 remainder
= expand_mult (compute_mode
, quotient
, op1
,
4960 NULL_RTX
, unsignedp
);
4961 remainder
= expand_binop (compute_mode
, sub_optab
, op0
,
4962 remainder
, target
, unsignedp
,
4967 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4970 /* Return a tree node with data type TYPE, describing the value of X.
4971 Usually this is an VAR_DECL, if there is no obvious better choice.
4972 X may be an expression, however we only support those expressions
4973 generated by loop.c. */
4976 make_tree (tree type
, rtx x
)
4980 switch (GET_CODE (x
))
4984 HOST_WIDE_INT hi
= 0;
4987 && !(TYPE_UNSIGNED (type
)
4988 && (GET_MODE_BITSIZE (TYPE_MODE (type
))
4989 < HOST_BITS_PER_WIDE_INT
)))
4992 t
= build_int_cst_wide (type
, INTVAL (x
), hi
);
4998 if (GET_MODE (x
) == VOIDmode
)
4999 t
= build_int_cst_wide (type
,
5000 CONST_DOUBLE_LOW (x
), CONST_DOUBLE_HIGH (x
));
5005 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
5006 t
= build_real (type
, d
);
5013 int units
= CONST_VECTOR_NUNITS (x
);
5014 tree itype
= TREE_TYPE (type
);
5019 /* Build a tree with vector elements. */
5020 for (i
= units
- 1; i
>= 0; --i
)
5022 rtx elt
= CONST_VECTOR_ELT (x
, i
);
5023 t
= tree_cons (NULL_TREE
, make_tree (itype
, elt
), t
);
5026 return build_vector (type
, t
);
5030 return fold_build2 (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5031 make_tree (type
, XEXP (x
, 1)));
5034 return fold_build2 (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5035 make_tree (type
, XEXP (x
, 1)));
5038 return fold_build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0)));
5041 return fold_build2 (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5042 make_tree (type
, XEXP (x
, 1)));
5045 return fold_build2 (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
5046 make_tree (type
, XEXP (x
, 1)));
5049 t
= unsigned_type_for (type
);
5050 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5051 make_tree (t
, XEXP (x
, 0)),
5052 make_tree (type
, XEXP (x
, 1))));
5055 t
= signed_type_for (type
);
5056 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
5057 make_tree (t
, XEXP (x
, 0)),
5058 make_tree (type
, XEXP (x
, 1))));
5061 if (TREE_CODE (type
) != REAL_TYPE
)
5062 t
= signed_type_for (type
);
5066 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5067 make_tree (t
, XEXP (x
, 0)),
5068 make_tree (t
, XEXP (x
, 1))));
5070 t
= unsigned_type_for (type
);
5071 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
5072 make_tree (t
, XEXP (x
, 0)),
5073 make_tree (t
, XEXP (x
, 1))));
5077 t
= lang_hooks
.types
.type_for_mode (GET_MODE (XEXP (x
, 0)),
5078 GET_CODE (x
) == ZERO_EXTEND
);
5079 return fold_convert (type
, make_tree (t
, XEXP (x
, 0)));
5082 return make_tree (type
, XEXP (x
, 0));
5085 t
= SYMBOL_REF_DECL (x
);
5087 return fold_convert (type
, build_fold_addr_expr (t
));
5088 /* else fall through. */
5091 t
= build_decl (RTL_LOCATION (x
), VAR_DECL
, NULL_TREE
, type
);
5093 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5094 address mode to pointer mode. */
5095 if (POINTER_TYPE_P (type
))
5096 x
= convert_memory_address_addr_space
5097 (TYPE_MODE (type
), x
, TYPE_ADDR_SPACE (TREE_TYPE (type
)));
5099 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5100 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5101 t
->decl_with_rtl
.rtl
= x
;
5107 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5108 and returning TARGET.
5110 If TARGET is 0, a pseudo-register or constant is returned. */
5113 expand_and (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
)
5117 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
5118 tem
= simplify_binary_operation (AND
, mode
, op0
, op1
);
5120 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
5124 else if (tem
!= target
)
5125 emit_move_insn (target
, tem
);
5129 /* Helper function for emit_store_flag. */
5131 emit_cstore (rtx target
, enum insn_code icode
, enum rtx_code code
,
5132 enum machine_mode mode
, enum machine_mode compare_mode
,
5133 int unsignedp
, rtx x
, rtx y
, int normalizep
,
5134 enum machine_mode target_mode
)
5136 rtx op0
, last
, comparison
, subtarget
, pattern
;
5137 enum machine_mode result_mode
= insn_data
[(int) icode
].operand
[0].mode
;
5139 last
= get_last_insn ();
5140 x
= prepare_operand (icode
, x
, 2, mode
, compare_mode
, unsignedp
);
5141 y
= prepare_operand (icode
, y
, 3, mode
, compare_mode
, unsignedp
);
5142 comparison
= gen_rtx_fmt_ee (code
, result_mode
, x
, y
);
5144 || !insn_data
[icode
].operand
[2].predicate
5145 (x
, insn_data
[icode
].operand
[2].mode
)
5146 || !insn_data
[icode
].operand
[3].predicate
5147 (y
, insn_data
[icode
].operand
[3].mode
)
5148 || !insn_data
[icode
].operand
[1].predicate (comparison
, VOIDmode
))
5150 delete_insns_since (last
);
5154 if (target_mode
== VOIDmode
)
5155 target_mode
= result_mode
;
5157 target
= gen_reg_rtx (target_mode
);
5160 || !(insn_data
[(int) icode
].operand
[0].predicate (target
, result_mode
)))
5161 subtarget
= gen_reg_rtx (result_mode
);
5165 pattern
= GEN_FCN (icode
) (subtarget
, comparison
, x
, y
);
5168 emit_insn (pattern
);
5170 /* If we are converting to a wider mode, first convert to
5171 TARGET_MODE, then normalize. This produces better combining
5172 opportunities on machines that have a SIGN_EXTRACT when we are
5173 testing a single bit. This mostly benefits the 68k.
5175 If STORE_FLAG_VALUE does not have the sign bit set when
5176 interpreted in MODE, we can do this conversion as unsigned, which
5177 is usually more efficient. */
5178 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (result_mode
))
5180 convert_move (target
, subtarget
,
5181 (GET_MODE_BITSIZE (result_mode
) <= HOST_BITS_PER_WIDE_INT
)
5182 && 0 == (STORE_FLAG_VALUE
5183 & ((HOST_WIDE_INT
) 1
5184 << (GET_MODE_BITSIZE (result_mode
) -1))));
5186 result_mode
= target_mode
;
5191 /* If we want to keep subexpressions around, don't reuse our last
5196 /* Now normalize to the proper value in MODE. Sometimes we don't
5197 have to do anything. */
5198 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
5200 /* STORE_FLAG_VALUE might be the most negative number, so write
5201 the comparison this way to avoid a compiler-time warning. */
5202 else if (- normalizep
== STORE_FLAG_VALUE
)
5203 op0
= expand_unop (result_mode
, neg_optab
, op0
, subtarget
, 0);
5205 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5206 it hard to use a value of just the sign bit due to ANSI integer
5207 constant typing rules. */
5208 else if (GET_MODE_BITSIZE (result_mode
) <= HOST_BITS_PER_WIDE_INT
5209 && (STORE_FLAG_VALUE
5210 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (result_mode
) - 1))))
5211 op0
= expand_shift (RSHIFT_EXPR
, result_mode
, op0
,
5212 size_int (GET_MODE_BITSIZE (result_mode
) - 1), subtarget
,
5216 gcc_assert (STORE_FLAG_VALUE
& 1);
5218 op0
= expand_and (result_mode
, op0
, const1_rtx
, subtarget
);
5219 if (normalizep
== -1)
5220 op0
= expand_unop (result_mode
, neg_optab
, op0
, op0
, 0);
5223 /* If we were converting to a smaller mode, do the conversion now. */
5224 if (target_mode
!= result_mode
)
5226 convert_move (target
, op0
, 0);
5234 /* A subroutine of emit_store_flag only including "tricks" that do not
5235 need a recursive call. These are kept separate to avoid infinite
5239 emit_store_flag_1 (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5240 enum machine_mode mode
, int unsignedp
, int normalizep
,
5241 enum machine_mode target_mode
)
5244 enum insn_code icode
;
5245 enum machine_mode compare_mode
;
5246 enum mode_class mclass
;
5247 enum rtx_code scode
;
5251 code
= unsigned_condition (code
);
5252 scode
= swap_condition (code
);
5254 /* If one operand is constant, make it the second one. Only do this
5255 if the other operand is not constant as well. */
5257 if (swap_commutative_operands_p (op0
, op1
))
5262 code
= swap_condition (code
);
5265 if (mode
== VOIDmode
)
5266 mode
= GET_MODE (op0
);
5268 /* For some comparisons with 1 and -1, we can convert this to
5269 comparisons with zero. This will often produce more opportunities for
5270 store-flag insns. */
5275 if (op1
== const1_rtx
)
5276 op1
= const0_rtx
, code
= LE
;
5279 if (op1
== constm1_rtx
)
5280 op1
= const0_rtx
, code
= LT
;
5283 if (op1
== const1_rtx
)
5284 op1
= const0_rtx
, code
= GT
;
5287 if (op1
== constm1_rtx
)
5288 op1
= const0_rtx
, code
= GE
;
5291 if (op1
== const1_rtx
)
5292 op1
= const0_rtx
, code
= NE
;
5295 if (op1
== const1_rtx
)
5296 op1
= const0_rtx
, code
= EQ
;
5302 /* If we are comparing a double-word integer with zero or -1, we can
5303 convert the comparison into one involving a single word. */
5304 if (GET_MODE_BITSIZE (mode
) == BITS_PER_WORD
* 2
5305 && GET_MODE_CLASS (mode
) == MODE_INT
5306 && (!MEM_P (op0
) || ! MEM_VOLATILE_P (op0
)))
5308 if ((code
== EQ
|| code
== NE
)
5309 && (op1
== const0_rtx
|| op1
== constm1_rtx
))
5313 /* Do a logical OR or AND of the two words and compare the
5315 op00
= simplify_gen_subreg (word_mode
, op0
, mode
, 0);
5316 op01
= simplify_gen_subreg (word_mode
, op0
, mode
, UNITS_PER_WORD
);
5317 tem
= expand_binop (word_mode
,
5318 op1
== const0_rtx
? ior_optab
: and_optab
,
5319 op00
, op01
, NULL_RTX
, unsignedp
,
5323 tem
= emit_store_flag (NULL_RTX
, code
, tem
, op1
, word_mode
,
5324 unsignedp
, normalizep
);
5326 else if ((code
== LT
|| code
== GE
) && op1
== const0_rtx
)
5330 /* If testing the sign bit, can just test on high word. */
5331 op0h
= simplify_gen_subreg (word_mode
, op0
, mode
,
5332 subreg_highpart_offset (word_mode
,
5334 tem
= emit_store_flag (NULL_RTX
, code
, op0h
, op1
, word_mode
,
5335 unsignedp
, normalizep
);
5342 if (target_mode
== VOIDmode
|| GET_MODE (tem
) == target_mode
)
5345 target
= gen_reg_rtx (target_mode
);
5347 convert_move (target
, tem
,
5348 0 == ((normalizep
? normalizep
: STORE_FLAG_VALUE
)
5349 & ((HOST_WIDE_INT
) 1
5350 << (GET_MODE_BITSIZE (word_mode
) -1))));
5355 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5356 complement of A (for GE) and shifting the sign bit to the low bit. */
5357 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
5358 && GET_MODE_CLASS (mode
) == MODE_INT
5359 && (normalizep
|| STORE_FLAG_VALUE
== 1
5360 || (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5361 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
5362 == ((unsigned HOST_WIDE_INT
) 1
5363 << (GET_MODE_BITSIZE (mode
) - 1))))))
5370 /* If the result is to be wider than OP0, it is best to convert it
5371 first. If it is to be narrower, it is *incorrect* to convert it
5373 else if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (mode
))
5375 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5379 if (target_mode
!= mode
)
5383 op0
= expand_unop (mode
, one_cmpl_optab
, op0
,
5384 ((STORE_FLAG_VALUE
== 1 || normalizep
)
5385 ? 0 : subtarget
), 0);
5387 if (STORE_FLAG_VALUE
== 1 || normalizep
)
5388 /* If we are supposed to produce a 0/1 value, we want to do
5389 a logical shift from the sign bit to the low-order bit; for
5390 a -1/0 value, we do an arithmetic shift. */
5391 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5392 size_int (GET_MODE_BITSIZE (mode
) - 1),
5393 subtarget
, normalizep
!= -1);
5395 if (mode
!= target_mode
)
5396 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5401 mclass
= GET_MODE_CLASS (mode
);
5402 for (compare_mode
= mode
; compare_mode
!= VOIDmode
;
5403 compare_mode
= GET_MODE_WIDER_MODE (compare_mode
))
5405 enum machine_mode optab_mode
= mclass
== MODE_CC
? CCmode
: compare_mode
;
5406 icode
= optab_handler (cstore_optab
, optab_mode
)->insn_code
;
5407 if (icode
!= CODE_FOR_nothing
)
5409 do_pending_stack_adjust ();
5410 tem
= emit_cstore (target
, icode
, code
, mode
, compare_mode
,
5411 unsignedp
, op0
, op1
, normalizep
, target_mode
);
5415 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5417 tem
= emit_cstore (target
, icode
, scode
, mode
, compare_mode
,
5418 unsignedp
, op1
, op0
, normalizep
, target_mode
);
5429 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5430 and storing in TARGET. Normally return TARGET.
5431 Return 0 if that cannot be done.
5433 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5434 it is VOIDmode, they cannot both be CONST_INT.
5436 UNSIGNEDP is for the case where we have to widen the operands
5437 to perform the operation. It says to use zero-extension.
5439 NORMALIZEP is 1 if we should convert the result to be either zero
5440 or one. Normalize is -1 if we should convert the result to be
5441 either zero or -1. If NORMALIZEP is zero, the result will be left
5442 "raw" out of the scc insn. */
5445 emit_store_flag (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5446 enum machine_mode mode
, int unsignedp
, int normalizep
)
5448 enum machine_mode target_mode
= target
? GET_MODE (target
) : VOIDmode
;
5449 enum rtx_code rcode
;
5451 rtx tem
, last
, trueval
;
5453 tem
= emit_store_flag_1 (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
,
5458 /* If we reached here, we can't do this with a scc insn, however there
5459 are some comparisons that can be done in other ways. Don't do any
5460 of these cases if branches are very cheap. */
5461 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5464 /* See what we need to return. We can only return a 1, -1, or the
5467 if (normalizep
== 0)
5469 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
5470 normalizep
= STORE_FLAG_VALUE
;
5472 else if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5473 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
5474 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))
5480 last
= get_last_insn ();
5482 /* If optimizing, use different pseudo registers for each insn, instead
5483 of reusing the same pseudo. This leads to better CSE, but slows
5484 down the compiler, since there are more pseudos */
5485 subtarget
= (!optimize
5486 && (target_mode
== mode
)) ? target
: NULL_RTX
;
5487 trueval
= GEN_INT (normalizep
? normalizep
: STORE_FLAG_VALUE
);
5489 /* For floating-point comparisons, try the reverse comparison or try
5490 changing the "orderedness" of the comparison. */
5491 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5493 enum rtx_code first_code
;
5496 rcode
= reverse_condition_maybe_unordered (code
);
5497 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5498 && (code
== ORDERED
|| code
== UNORDERED
5499 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5500 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5502 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5503 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5505 /* For the reverse comparison, use either an addition or a XOR. */
5507 && rtx_cost (GEN_INT (normalizep
), PLUS
,
5508 optimize_insn_for_speed_p ()) == 0)
5510 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5511 STORE_FLAG_VALUE
, target_mode
);
5513 return expand_binop (target_mode
, add_optab
, tem
,
5514 GEN_INT (normalizep
),
5515 target
, 0, OPTAB_WIDEN
);
5518 && rtx_cost (trueval
, XOR
,
5519 optimize_insn_for_speed_p ()) == 0)
5521 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5522 normalizep
, target_mode
);
5524 return expand_binop (target_mode
, xor_optab
, tem
, trueval
,
5525 target
, INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5529 delete_insns_since (last
);
5531 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5532 if (code
== ORDERED
|| code
== UNORDERED
)
5535 and_them
= split_comparison (code
, mode
, &first_code
, &code
);
5537 /* If there are no NaNs, the first comparison should always fall through.
5538 Effectively change the comparison to the other one. */
5539 if (!HONOR_NANS (mode
))
5541 gcc_assert (first_code
== (and_them
? ORDERED
: UNORDERED
));
5542 return emit_store_flag_1 (target
, code
, op0
, op1
, mode
, 0, normalizep
,
5546 #ifdef HAVE_conditional_move
5547 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5548 conditional move. */
5549 tem
= emit_store_flag_1 (subtarget
, first_code
, op0
, op1
, mode
, 0,
5550 normalizep
, target_mode
);
5555 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5556 tem
, const0_rtx
, GET_MODE (tem
), 0);
5558 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5559 trueval
, tem
, GET_MODE (tem
), 0);
5562 delete_insns_since (last
);
5569 /* The remaining tricks only apply to integer comparisons. */
5571 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5574 /* If this is an equality comparison of integers, we can try to exclusive-or
5575 (or subtract) the two operands and use a recursive call to try the
5576 comparison with zero. Don't do any of these cases if branches are
5579 if ((code
== EQ
|| code
== NE
) && op1
!= const0_rtx
)
5581 tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
5585 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
5588 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
5589 mode
, unsignedp
, normalizep
);
5593 delete_insns_since (last
);
5596 /* For integer comparisons, try the reverse comparison. However, for
5597 small X and if we'd have anyway to extend, implementing "X != 0"
5598 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5599 rcode
= reverse_condition (code
);
5600 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5601 && ! (optab_handler (cstore_optab
, mode
)->insn_code
== CODE_FOR_nothing
5603 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
5604 && op1
== const0_rtx
))
5606 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5607 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5609 /* Again, for the reverse comparison, use either an addition or a XOR. */
5611 && rtx_cost (GEN_INT (normalizep
), PLUS
,
5612 optimize_insn_for_speed_p ()) == 0)
5614 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5615 STORE_FLAG_VALUE
, target_mode
);
5617 tem
= expand_binop (target_mode
, add_optab
, tem
,
5618 GEN_INT (normalizep
), target
, 0, OPTAB_WIDEN
);
5621 && rtx_cost (trueval
, XOR
,
5622 optimize_insn_for_speed_p ()) == 0)
5624 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5625 normalizep
, target_mode
);
5627 tem
= expand_binop (target_mode
, xor_optab
, tem
, trueval
, target
,
5628 INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5633 delete_insns_since (last
);
5636 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5637 the constant zero. Reject all other comparisons at this point. Only
5638 do LE and GT if branches are expensive since they are expensive on
5639 2-operand machines. */
5641 if (op1
!= const0_rtx
5642 || (code
!= EQ
&& code
!= NE
5643 && (BRANCH_COST (optimize_insn_for_speed_p (),
5644 false) <= 1 || (code
!= LE
&& code
!= GT
))))
5647 /* Try to put the result of the comparison in the sign bit. Assume we can't
5648 do the necessary operation below. */
5652 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5653 the sign bit set. */
5657 /* This is destructive, so SUBTARGET can't be OP0. */
5658 if (rtx_equal_p (subtarget
, op0
))
5661 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
5664 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
5668 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5669 number of bits in the mode of OP0, minus one. */
5673 if (rtx_equal_p (subtarget
, op0
))
5676 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5677 size_int (GET_MODE_BITSIZE (mode
) - 1),
5679 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
5683 if (code
== EQ
|| code
== NE
)
5685 /* For EQ or NE, one way to do the comparison is to apply an operation
5686 that converts the operand into a positive number if it is nonzero
5687 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5688 for NE we negate. This puts the result in the sign bit. Then we
5689 normalize with a shift, if needed.
5691 Two operations that can do the above actions are ABS and FFS, so try
5692 them. If that doesn't work, and MODE is smaller than a full word,
5693 we can use zero-extension to the wider mode (an unsigned conversion)
5694 as the operation. */
5696 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5697 that is compensated by the subsequent overflow when subtracting
5700 if (optab_handler (abs_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
5701 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
5702 else if (optab_handler (ffs_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
5703 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
5704 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
5706 tem
= convert_modes (word_mode
, mode
, op0
, 1);
5713 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
5716 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
5719 /* If we couldn't do it that way, for NE we can "or" the two's complement
5720 of the value with itself. For EQ, we take the one's complement of
5721 that "or", which is an extra insn, so we only handle EQ if branches
5726 || BRANCH_COST (optimize_insn_for_speed_p (),
5729 if (rtx_equal_p (subtarget
, op0
))
5732 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
5733 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
5736 if (tem
&& code
== EQ
)
5737 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
5741 if (tem
&& normalizep
)
5742 tem
= expand_shift (RSHIFT_EXPR
, mode
, tem
,
5743 size_int (GET_MODE_BITSIZE (mode
) - 1),
5744 subtarget
, normalizep
== 1);
5750 else if (GET_MODE (tem
) != target_mode
)
5752 convert_move (target
, tem
, 0);
5755 else if (!subtarget
)
5757 emit_move_insn (target
, tem
);
5762 delete_insns_since (last
);
5767 /* Like emit_store_flag, but always succeeds. */
5770 emit_store_flag_force (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5771 enum machine_mode mode
, int unsignedp
, int normalizep
)
5774 rtx trueval
, falseval
;
5776 /* First see if emit_store_flag can do the job. */
5777 tem
= emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
);
5782 target
= gen_reg_rtx (word_mode
);
5784 /* If this failed, we have to do this with set/compare/jump/set code.
5785 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5786 trueval
= normalizep
? GEN_INT (normalizep
) : const1_rtx
;
5788 && GET_MODE_CLASS (mode
) == MODE_INT
5791 && op1
== const0_rtx
)
5793 label
= gen_label_rtx ();
5794 do_compare_rtx_and_jump (target
, const0_rtx
, EQ
, unsignedp
,
5795 mode
, NULL_RTX
, NULL_RTX
, label
, -1);
5796 emit_move_insn (target
, trueval
);
5802 || reg_mentioned_p (target
, op0
) || reg_mentioned_p (target
, op1
))
5803 target
= gen_reg_rtx (GET_MODE (target
));
5805 /* Jump in the right direction if the target cannot implement CODE
5806 but can jump on its reverse condition. */
5807 falseval
= const0_rtx
;
5808 if (! can_compare_p (code
, mode
, ccp_jump
)
5809 && (! FLOAT_MODE_P (mode
)
5810 || code
== ORDERED
|| code
== UNORDERED
5811 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5812 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5814 enum rtx_code rcode
;
5815 if (FLOAT_MODE_P (mode
))
5816 rcode
= reverse_condition_maybe_unordered (code
);
5818 rcode
= reverse_condition (code
);
5820 /* Canonicalize to UNORDERED for the libcall. */
5821 if (can_compare_p (rcode
, mode
, ccp_jump
)
5822 || (code
== ORDERED
&& ! can_compare_p (ORDERED
, mode
, ccp_jump
)))
5825 trueval
= const0_rtx
;
5830 emit_move_insn (target
, trueval
);
5831 label
= gen_label_rtx ();
5832 do_compare_rtx_and_jump (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
,
5833 NULL_RTX
, label
, -1);
5835 emit_move_insn (target
, falseval
);
5841 /* Perform possibly multi-word comparison and conditional jump to LABEL
5842 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5843 now a thin wrapper around do_compare_rtx_and_jump. */
5846 do_cmp_and_jump (rtx arg1
, rtx arg2
, enum rtx_code op
, enum machine_mode mode
,
5849 int unsignedp
= (op
== LTU
|| op
== LEU
|| op
== GTU
|| op
== GEU
);
5850 do_compare_rtx_and_jump (arg1
, arg2
, op
, unsignedp
, mode
,
5851 NULL_RTX
, NULL_RTX
, label
, -1);