contrib.texi: Update contributions.
[official-gcc.git] / gcc / expmed.c
blob70a0d18bcb52afb7a1408532cea52bca2a7cf762
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 Free Software Foundation, Inc.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "tm_p.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "optabs.h"
36 #include "real.h"
37 #include "recog.h"
38 #include "langhooks.h"
39 #include "df.h"
40 #include "target.h"
42 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
43 unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx);
45 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
46 unsigned HOST_WIDE_INT, rtx);
47 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
48 unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT,
50 unsigned HOST_WIDE_INT, rtx, int);
51 static rtx mask_rtx (enum machine_mode, int, int, int);
52 static rtx lshift_value (enum machine_mode, rtx, int, int);
53 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
54 unsigned HOST_WIDE_INT, int);
55 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
56 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
57 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
59 /* Test whether a value is zero of a power of two. */
60 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
62 /* Nonzero means divides or modulus operations are relatively cheap for
63 powers of two, so don't use branches; emit the operation instead.
64 Usually, this will mean that the MD file will emit non-branch
65 sequences. */
67 static bool sdiv_pow2_cheap[2][NUM_MACHINE_MODES];
68 static bool smod_pow2_cheap[2][NUM_MACHINE_MODES];
70 #ifndef SLOW_UNALIGNED_ACCESS
71 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
72 #endif
74 /* For compilers that support multiple targets with different word sizes,
75 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
76 is the H8/300(H) compiler. */
78 #ifndef MAX_BITS_PER_WORD
79 #define MAX_BITS_PER_WORD BITS_PER_WORD
80 #endif
82 /* Reduce conditional compilation elsewhere. */
83 #ifndef HAVE_insv
84 #define HAVE_insv 0
85 #define CODE_FOR_insv CODE_FOR_nothing
86 #define gen_insv(a,b,c,d) NULL_RTX
87 #endif
88 #ifndef HAVE_extv
89 #define HAVE_extv 0
90 #define CODE_FOR_extv CODE_FOR_nothing
91 #define gen_extv(a,b,c,d) NULL_RTX
92 #endif
93 #ifndef HAVE_extzv
94 #define HAVE_extzv 0
95 #define CODE_FOR_extzv CODE_FOR_nothing
96 #define gen_extzv(a,b,c,d) NULL_RTX
97 #endif
99 /* Cost of various pieces of RTL. Note that some of these are indexed by
100 shift count and some by mode. */
101 static int zero_cost[2];
102 static int add_cost[2][NUM_MACHINE_MODES];
103 static int neg_cost[2][NUM_MACHINE_MODES];
104 static int shift_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
105 static int shiftadd_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
106 static int shiftsub_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
107 static int mul_cost[2][NUM_MACHINE_MODES];
108 static int sdiv_cost[2][NUM_MACHINE_MODES];
109 static int udiv_cost[2][NUM_MACHINE_MODES];
110 static int mul_widen_cost[2][NUM_MACHINE_MODES];
111 static int mul_highpart_cost[2][NUM_MACHINE_MODES];
113 void
114 init_expmed (void)
116 struct
118 struct rtx_def reg; rtunion reg_fld[2];
119 struct rtx_def plus; rtunion plus_fld1;
120 struct rtx_def neg;
121 struct rtx_def mult; rtunion mult_fld1;
122 struct rtx_def sdiv; rtunion sdiv_fld1;
123 struct rtx_def udiv; rtunion udiv_fld1;
124 struct rtx_def zext;
125 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
126 struct rtx_def smod_32; rtunion smod_32_fld1;
127 struct rtx_def wide_mult; rtunion wide_mult_fld1;
128 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
129 struct rtx_def wide_trunc;
130 struct rtx_def shift; rtunion shift_fld1;
131 struct rtx_def shift_mult; rtunion shift_mult_fld1;
132 struct rtx_def shift_add; rtunion shift_add_fld1;
133 struct rtx_def shift_sub; rtunion shift_sub_fld1;
134 } all;
136 rtx pow2[MAX_BITS_PER_WORD];
137 rtx cint[MAX_BITS_PER_WORD];
138 int m, n;
139 enum machine_mode mode, wider_mode;
140 int speed;
143 for (m = 1; m < MAX_BITS_PER_WORD; m++)
145 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
146 cint[m] = GEN_INT (m);
148 memset (&all, 0, sizeof all);
150 PUT_CODE (&all.reg, REG);
151 /* Avoid using hard regs in ways which may be unsupported. */
152 SET_REGNO (&all.reg, LAST_VIRTUAL_REGISTER + 1);
154 PUT_CODE (&all.plus, PLUS);
155 XEXP (&all.plus, 0) = &all.reg;
156 XEXP (&all.plus, 1) = &all.reg;
158 PUT_CODE (&all.neg, NEG);
159 XEXP (&all.neg, 0) = &all.reg;
161 PUT_CODE (&all.mult, MULT);
162 XEXP (&all.mult, 0) = &all.reg;
163 XEXP (&all.mult, 1) = &all.reg;
165 PUT_CODE (&all.sdiv, DIV);
166 XEXP (&all.sdiv, 0) = &all.reg;
167 XEXP (&all.sdiv, 1) = &all.reg;
169 PUT_CODE (&all.udiv, UDIV);
170 XEXP (&all.udiv, 0) = &all.reg;
171 XEXP (&all.udiv, 1) = &all.reg;
173 PUT_CODE (&all.sdiv_32, DIV);
174 XEXP (&all.sdiv_32, 0) = &all.reg;
175 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
177 PUT_CODE (&all.smod_32, MOD);
178 XEXP (&all.smod_32, 0) = &all.reg;
179 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
181 PUT_CODE (&all.zext, ZERO_EXTEND);
182 XEXP (&all.zext, 0) = &all.reg;
184 PUT_CODE (&all.wide_mult, MULT);
185 XEXP (&all.wide_mult, 0) = &all.zext;
186 XEXP (&all.wide_mult, 1) = &all.zext;
188 PUT_CODE (&all.wide_lshr, LSHIFTRT);
189 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
191 PUT_CODE (&all.wide_trunc, TRUNCATE);
192 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
194 PUT_CODE (&all.shift, ASHIFT);
195 XEXP (&all.shift, 0) = &all.reg;
197 PUT_CODE (&all.shift_mult, MULT);
198 XEXP (&all.shift_mult, 0) = &all.reg;
200 PUT_CODE (&all.shift_add, PLUS);
201 XEXP (&all.shift_add, 0) = &all.shift_mult;
202 XEXP (&all.shift_add, 1) = &all.reg;
204 PUT_CODE (&all.shift_sub, MINUS);
205 XEXP (&all.shift_sub, 0) = &all.shift_mult;
206 XEXP (&all.shift_sub, 1) = &all.reg;
208 for (speed = 0; speed < 2; speed++)
210 crtl->maybe_hot_insn_p = speed;
211 zero_cost[speed] = rtx_cost (const0_rtx, 0, speed);
213 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
214 mode != VOIDmode;
215 mode = GET_MODE_WIDER_MODE (mode))
217 PUT_MODE (&all.reg, mode);
218 PUT_MODE (&all.plus, mode);
219 PUT_MODE (&all.neg, mode);
220 PUT_MODE (&all.mult, mode);
221 PUT_MODE (&all.sdiv, mode);
222 PUT_MODE (&all.udiv, mode);
223 PUT_MODE (&all.sdiv_32, mode);
224 PUT_MODE (&all.smod_32, mode);
225 PUT_MODE (&all.wide_trunc, mode);
226 PUT_MODE (&all.shift, mode);
227 PUT_MODE (&all.shift_mult, mode);
228 PUT_MODE (&all.shift_add, mode);
229 PUT_MODE (&all.shift_sub, mode);
231 add_cost[speed][mode] = rtx_cost (&all.plus, SET, speed);
232 neg_cost[speed][mode] = rtx_cost (&all.neg, SET, speed);
233 mul_cost[speed][mode] = rtx_cost (&all.mult, SET, speed);
234 sdiv_cost[speed][mode] = rtx_cost (&all.sdiv, SET, speed);
235 udiv_cost[speed][mode] = rtx_cost (&all.udiv, SET, speed);
237 sdiv_pow2_cheap[speed][mode] = (rtx_cost (&all.sdiv_32, SET, speed)
238 <= 2 * add_cost[speed][mode]);
239 smod_pow2_cheap[speed][mode] = (rtx_cost (&all.smod_32, SET, speed)
240 <= 4 * add_cost[speed][mode]);
242 wider_mode = GET_MODE_WIDER_MODE (mode);
243 if (wider_mode != VOIDmode)
245 PUT_MODE (&all.zext, wider_mode);
246 PUT_MODE (&all.wide_mult, wider_mode);
247 PUT_MODE (&all.wide_lshr, wider_mode);
248 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
250 mul_widen_cost[speed][wider_mode]
251 = rtx_cost (&all.wide_mult, SET, speed);
252 mul_highpart_cost[speed][mode]
253 = rtx_cost (&all.wide_trunc, SET, speed);
256 shift_cost[speed][mode][0] = 0;
257 shiftadd_cost[speed][mode][0] = shiftsub_cost[speed][mode][0]
258 = add_cost[speed][mode];
260 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
261 for (m = 1; m < n; m++)
263 XEXP (&all.shift, 1) = cint[m];
264 XEXP (&all.shift_mult, 1) = pow2[m];
266 shift_cost[speed][mode][m] = rtx_cost (&all.shift, SET, speed);
267 shiftadd_cost[speed][mode][m] = rtx_cost (&all.shift_add, SET, speed);
268 shiftsub_cost[speed][mode][m] = rtx_cost (&all.shift_sub, SET, speed);
272 default_rtl_profile ();
275 /* Return an rtx representing minus the value of X.
276 MODE is the intended mode of the result,
277 useful if X is a CONST_INT. */
280 negate_rtx (enum machine_mode mode, rtx x)
282 rtx result = simplify_unary_operation (NEG, mode, x, mode);
284 if (result == 0)
285 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
287 return result;
290 /* Report on the availability of insv/extv/extzv and the desired mode
291 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
292 is false; else the mode of the specified operand. If OPNO is -1,
293 all the caller cares about is whether the insn is available. */
294 enum machine_mode
295 mode_for_extraction (enum extraction_pattern pattern, int opno)
297 const struct insn_data *data;
299 switch (pattern)
301 case EP_insv:
302 if (HAVE_insv)
304 data = &insn_data[CODE_FOR_insv];
305 break;
307 return MAX_MACHINE_MODE;
309 case EP_extv:
310 if (HAVE_extv)
312 data = &insn_data[CODE_FOR_extv];
313 break;
315 return MAX_MACHINE_MODE;
317 case EP_extzv:
318 if (HAVE_extzv)
320 data = &insn_data[CODE_FOR_extzv];
321 break;
323 return MAX_MACHINE_MODE;
325 default:
326 gcc_unreachable ();
329 if (opno == -1)
330 return VOIDmode;
332 /* Everyone who uses this function used to follow it with
333 if (result == VOIDmode) result = word_mode; */
334 if (data->operand[opno].mode == VOIDmode)
335 return word_mode;
336 return data->operand[opno].mode;
339 /* Return true if X, of mode MODE, matches the predicate for operand
340 OPNO of instruction ICODE. Allow volatile memories, regardless of
341 the ambient volatile_ok setting. */
343 static bool
344 check_predicate_volatile_ok (enum insn_code icode, int opno,
345 rtx x, enum machine_mode mode)
347 bool save_volatile_ok, result;
349 save_volatile_ok = volatile_ok;
350 result = insn_data[(int) icode].operand[opno].predicate (x, mode);
351 volatile_ok = save_volatile_ok;
352 return result;
355 /* A subroutine of store_bit_field, with the same arguments. Return true
356 if the operation could be implemented.
358 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
359 no other way of implementing the operation. If FALLBACK_P is false,
360 return false instead. */
362 static bool
363 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
364 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
365 rtx value, bool fallback_p)
367 unsigned int unit
368 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
369 unsigned HOST_WIDE_INT offset, bitpos;
370 rtx op0 = str_rtx;
371 int byte_offset;
372 rtx orig_value;
374 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
376 while (GET_CODE (op0) == SUBREG)
378 /* The following line once was done only if WORDS_BIG_ENDIAN,
379 but I think that is a mistake. WORDS_BIG_ENDIAN is
380 meaningful at a much higher level; when structures are copied
381 between memory and regs, the higher-numbered regs
382 always get higher addresses. */
383 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
384 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
386 byte_offset = 0;
388 /* Paradoxical subregs need special handling on big endian machines. */
389 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
391 int difference = inner_mode_size - outer_mode_size;
393 if (WORDS_BIG_ENDIAN)
394 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
395 if (BYTES_BIG_ENDIAN)
396 byte_offset += difference % UNITS_PER_WORD;
398 else
399 byte_offset = SUBREG_BYTE (op0);
401 bitnum += byte_offset * BITS_PER_UNIT;
402 op0 = SUBREG_REG (op0);
405 /* No action is needed if the target is a register and if the field
406 lies completely outside that register. This can occur if the source
407 code contains an out-of-bounds access to a small array. */
408 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
409 return true;
411 /* Use vec_set patterns for inserting parts of vectors whenever
412 available. */
413 if (VECTOR_MODE_P (GET_MODE (op0))
414 && !MEM_P (op0)
415 && (optab_handler (vec_set_optab, GET_MODE (op0))->insn_code
416 != CODE_FOR_nothing)
417 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
418 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
419 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
421 enum machine_mode outermode = GET_MODE (op0);
422 enum machine_mode innermode = GET_MODE_INNER (outermode);
423 int icode = (int) optab_handler (vec_set_optab, outermode)->insn_code;
424 int pos = bitnum / GET_MODE_BITSIZE (innermode);
425 rtx rtxpos = GEN_INT (pos);
426 rtx src = value;
427 rtx dest = op0;
428 rtx pat, seq;
429 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
430 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
431 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
433 start_sequence ();
435 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
436 src = copy_to_mode_reg (mode1, src);
438 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
439 rtxpos = copy_to_mode_reg (mode1, rtxpos);
441 /* We could handle this, but we should always be called with a pseudo
442 for our targets and all insns should take them as outputs. */
443 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
444 && (*insn_data[icode].operand[1].predicate) (src, mode1)
445 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
446 pat = GEN_FCN (icode) (dest, src, rtxpos);
447 seq = get_insns ();
448 end_sequence ();
449 if (pat)
451 emit_insn (seq);
452 emit_insn (pat);
453 return true;
457 /* If the target is a register, overwriting the entire object, or storing
458 a full-word or multi-word field can be done with just a SUBREG.
460 If the target is memory, storing any naturally aligned field can be
461 done with a simple store. For targets that support fast unaligned
462 memory, any naturally sized, unit aligned field can be done directly. */
464 offset = bitnum / unit;
465 bitpos = bitnum % unit;
466 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
467 + (offset * UNITS_PER_WORD);
469 if (bitpos == 0
470 && bitsize == GET_MODE_BITSIZE (fieldmode)
471 && (!MEM_P (op0)
472 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
473 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
474 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
475 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
476 || (offset * BITS_PER_UNIT % bitsize == 0
477 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
479 if (MEM_P (op0))
480 op0 = adjust_address (op0, fieldmode, offset);
481 else if (GET_MODE (op0) != fieldmode)
482 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
483 byte_offset);
484 emit_move_insn (op0, value);
485 return true;
488 /* Make sure we are playing with integral modes. Pun with subregs
489 if we aren't. This must come after the entire register case above,
490 since that case is valid for any mode. The following cases are only
491 valid for integral modes. */
493 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
494 if (imode != GET_MODE (op0))
496 if (MEM_P (op0))
497 op0 = adjust_address (op0, imode, 0);
498 else
500 gcc_assert (imode != BLKmode);
501 op0 = gen_lowpart (imode, op0);
506 /* We may be accessing data outside the field, which means
507 we can alias adjacent data. */
508 if (MEM_P (op0))
510 op0 = shallow_copy_rtx (op0);
511 set_mem_alias_set (op0, 0);
512 set_mem_expr (op0, 0);
515 /* If OP0 is a register, BITPOS must count within a word.
516 But as we have it, it counts within whatever size OP0 now has.
517 On a bigendian machine, these are not the same, so convert. */
518 if (BYTES_BIG_ENDIAN
519 && !MEM_P (op0)
520 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
521 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
523 /* Storing an lsb-aligned field in a register
524 can be done with a movestrict instruction. */
526 if (!MEM_P (op0)
527 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
528 && bitsize == GET_MODE_BITSIZE (fieldmode)
529 && (optab_handler (movstrict_optab, fieldmode)->insn_code
530 != CODE_FOR_nothing))
532 int icode = optab_handler (movstrict_optab, fieldmode)->insn_code;
533 rtx insn;
534 rtx start = get_last_insn ();
536 /* Get appropriate low part of the value being stored. */
537 if (GET_CODE (value) == CONST_INT || REG_P (value))
538 value = gen_lowpart (fieldmode, value);
539 else if (!(GET_CODE (value) == SYMBOL_REF
540 || GET_CODE (value) == LABEL_REF
541 || GET_CODE (value) == CONST))
542 value = convert_to_mode (fieldmode, value, 0);
544 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
545 value = copy_to_mode_reg (fieldmode, value);
547 if (GET_CODE (op0) == SUBREG)
549 /* Else we've got some float mode source being extracted into
550 a different float mode destination -- this combination of
551 subregs results in Severe Tire Damage. */
552 gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
553 || GET_MODE_CLASS (fieldmode) == MODE_INT
554 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
555 op0 = SUBREG_REG (op0);
558 insn = (GEN_FCN (icode)
559 (gen_rtx_SUBREG (fieldmode, op0,
560 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
561 + (offset * UNITS_PER_WORD)),
562 value));
563 if (insn)
565 emit_insn (insn);
566 return true;
568 delete_insns_since (start);
571 /* Handle fields bigger than a word. */
573 if (bitsize > BITS_PER_WORD)
575 /* Here we transfer the words of the field
576 in the order least significant first.
577 This is because the most significant word is the one which may
578 be less than full.
579 However, only do that if the value is not BLKmode. */
581 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
582 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
583 unsigned int i;
584 rtx last;
586 /* This is the mode we must force value to, so that there will be enough
587 subwords to extract. Note that fieldmode will often (always?) be
588 VOIDmode, because that is what store_field uses to indicate that this
589 is a bit field, but passing VOIDmode to operand_subword_force
590 is not allowed. */
591 fieldmode = GET_MODE (value);
592 if (fieldmode == VOIDmode)
593 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
595 last = get_last_insn ();
596 for (i = 0; i < nwords; i++)
598 /* If I is 0, use the low-order word in both field and target;
599 if I is 1, use the next to lowest word; and so on. */
600 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
601 unsigned int bit_offset = (backwards
602 ? MAX ((int) bitsize - ((int) i + 1)
603 * BITS_PER_WORD,
605 : (int) i * BITS_PER_WORD);
606 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
608 if (!store_bit_field_1 (op0, MIN (BITS_PER_WORD,
609 bitsize - i * BITS_PER_WORD),
610 bitnum + bit_offset, word_mode,
611 value_word, fallback_p))
613 delete_insns_since (last);
614 return false;
617 return true;
620 /* From here on we can assume that the field to be stored in is
621 a full-word (whatever type that is), since it is shorter than a word. */
623 /* OFFSET is the number of words or bytes (UNIT says which)
624 from STR_RTX to the first word or byte containing part of the field. */
626 if (!MEM_P (op0))
628 if (offset != 0
629 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
631 if (!REG_P (op0))
633 /* Since this is a destination (lvalue), we can't copy
634 it to a pseudo. We can remove a SUBREG that does not
635 change the size of the operand. Such a SUBREG may
636 have been added above. */
637 gcc_assert (GET_CODE (op0) == SUBREG
638 && (GET_MODE_SIZE (GET_MODE (op0))
639 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
640 op0 = SUBREG_REG (op0);
642 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
643 op0, (offset * UNITS_PER_WORD));
645 offset = 0;
648 /* If VALUE has a floating-point or complex mode, access it as an
649 integer of the corresponding size. This can occur on a machine
650 with 64 bit registers that uses SFmode for float. It can also
651 occur for unaligned float or complex fields. */
652 orig_value = value;
653 if (GET_MODE (value) != VOIDmode
654 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
655 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
657 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
658 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
661 /* Now OFFSET is nonzero only if OP0 is memory
662 and is therefore always measured in bytes. */
664 if (HAVE_insv
665 && GET_MODE (value) != BLKmode
666 && bitsize > 0
667 && GET_MODE_BITSIZE (op_mode) >= bitsize
668 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
669 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))
670 && insn_data[CODE_FOR_insv].operand[1].predicate (GEN_INT (bitsize),
671 VOIDmode)
672 && check_predicate_volatile_ok (CODE_FOR_insv, 0, op0, VOIDmode))
674 int xbitpos = bitpos;
675 rtx value1;
676 rtx xop0 = op0;
677 rtx last = get_last_insn ();
678 rtx pat;
680 /* Add OFFSET into OP0's address. */
681 if (MEM_P (xop0))
682 xop0 = adjust_address (xop0, byte_mode, offset);
684 /* If xop0 is a register, we need it in OP_MODE
685 to make it acceptable to the format of insv. */
686 if (GET_CODE (xop0) == SUBREG)
687 /* We can't just change the mode, because this might clobber op0,
688 and we will need the original value of op0 if insv fails. */
689 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
690 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
691 xop0 = gen_rtx_SUBREG (op_mode, xop0, 0);
693 /* On big-endian machines, we count bits from the most significant.
694 If the bit field insn does not, we must invert. */
696 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
697 xbitpos = unit - bitsize - xbitpos;
699 /* We have been counting XBITPOS within UNIT.
700 Count instead within the size of the register. */
701 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
702 xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
704 unit = GET_MODE_BITSIZE (op_mode);
706 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
707 value1 = value;
708 if (GET_MODE (value) != op_mode)
710 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
712 /* Optimization: Don't bother really extending VALUE
713 if it has all the bits we will actually use. However,
714 if we must narrow it, be sure we do it correctly. */
716 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
718 rtx tmp;
720 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
721 if (! tmp)
722 tmp = simplify_gen_subreg (op_mode,
723 force_reg (GET_MODE (value),
724 value1),
725 GET_MODE (value), 0);
726 value1 = tmp;
728 else
729 value1 = gen_lowpart (op_mode, value1);
731 else if (GET_CODE (value) == CONST_INT)
732 value1 = gen_int_mode (INTVAL (value), op_mode);
733 else
734 /* Parse phase is supposed to make VALUE's data type
735 match that of the component reference, which is a type
736 at least as wide as the field; so VALUE should have
737 a mode that corresponds to that type. */
738 gcc_assert (CONSTANT_P (value));
741 /* If this machine's insv insists on a register,
742 get VALUE1 into a register. */
743 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
744 (value1, op_mode)))
745 value1 = force_reg (op_mode, value1);
747 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
748 if (pat)
750 emit_insn (pat);
751 return true;
753 delete_insns_since (last);
756 /* If OP0 is a memory, try copying it to a register and seeing if a
757 cheap register alternative is available. */
758 if (HAVE_insv && MEM_P (op0))
760 enum machine_mode bestmode;
762 /* Get the mode to use for inserting into this field. If OP0 is
763 BLKmode, get the smallest mode consistent with the alignment. If
764 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
765 mode. Otherwise, use the smallest mode containing the field. */
767 if (GET_MODE (op0) == BLKmode
768 || (op_mode != MAX_MACHINE_MODE
769 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (op_mode)))
770 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
771 (op_mode == MAX_MACHINE_MODE
772 ? VOIDmode : op_mode),
773 MEM_VOLATILE_P (op0));
774 else
775 bestmode = GET_MODE (op0);
777 if (bestmode != VOIDmode
778 && GET_MODE_SIZE (bestmode) >= GET_MODE_SIZE (fieldmode)
779 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
780 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
782 rtx last, tempreg, xop0;
783 unsigned HOST_WIDE_INT xoffset, xbitpos;
785 last = get_last_insn ();
787 /* Adjust address to point to the containing unit of
788 that mode. Compute the offset as a multiple of this unit,
789 counting in bytes. */
790 unit = GET_MODE_BITSIZE (bestmode);
791 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
792 xbitpos = bitnum % unit;
793 xop0 = adjust_address (op0, bestmode, xoffset);
795 /* Fetch that unit, store the bitfield in it, then store
796 the unit. */
797 tempreg = copy_to_reg (xop0);
798 if (store_bit_field_1 (tempreg, bitsize, xbitpos,
799 fieldmode, orig_value, false))
801 emit_move_insn (xop0, tempreg);
802 return true;
804 delete_insns_since (last);
808 if (!fallback_p)
809 return false;
811 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
812 return true;
815 /* Generate code to store value from rtx VALUE
816 into a bit-field within structure STR_RTX
817 containing BITSIZE bits starting at bit BITNUM.
818 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
820 void
821 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
822 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
823 rtx value)
825 if (!store_bit_field_1 (str_rtx, bitsize, bitnum, fieldmode, value, true))
826 gcc_unreachable ();
829 /* Use shifts and boolean operations to store VALUE
830 into a bit field of width BITSIZE
831 in a memory location specified by OP0 except offset by OFFSET bytes.
832 (OFFSET must be 0 if OP0 is a register.)
833 The field starts at position BITPOS within the byte.
834 (If OP0 is a register, it may be a full word or a narrower mode,
835 but BITPOS still counts within a full word,
836 which is significant on bigendian machines.) */
838 static void
839 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
840 unsigned HOST_WIDE_INT bitsize,
841 unsigned HOST_WIDE_INT bitpos, rtx value)
843 enum machine_mode mode;
844 unsigned int total_bits = BITS_PER_WORD;
845 rtx temp;
846 int all_zero = 0;
847 int all_one = 0;
849 /* There is a case not handled here:
850 a structure with a known alignment of just a halfword
851 and a field split across two aligned halfwords within the structure.
852 Or likewise a structure with a known alignment of just a byte
853 and a field split across two bytes.
854 Such cases are not supposed to be able to occur. */
856 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
858 gcc_assert (!offset);
859 /* Special treatment for a bit field split across two registers. */
860 if (bitsize + bitpos > BITS_PER_WORD)
862 store_split_bit_field (op0, bitsize, bitpos, value);
863 return;
866 else
868 /* Get the proper mode to use for this field. We want a mode that
869 includes the entire field. If such a mode would be larger than
870 a word, we won't be doing the extraction the normal way.
871 We don't want a mode bigger than the destination. */
873 mode = GET_MODE (op0);
874 if (GET_MODE_BITSIZE (mode) == 0
875 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
876 mode = word_mode;
877 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
878 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
880 if (mode == VOIDmode)
882 /* The only way this should occur is if the field spans word
883 boundaries. */
884 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
885 value);
886 return;
889 total_bits = GET_MODE_BITSIZE (mode);
891 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
892 be in the range 0 to total_bits-1, and put any excess bytes in
893 OFFSET. */
894 if (bitpos >= total_bits)
896 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
897 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
898 * BITS_PER_UNIT);
901 /* Get ref to an aligned byte, halfword, or word containing the field.
902 Adjust BITPOS to be position within a word,
903 and OFFSET to be the offset of that word.
904 Then alter OP0 to refer to that word. */
905 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
906 offset -= (offset % (total_bits / BITS_PER_UNIT));
907 op0 = adjust_address (op0, mode, offset);
910 mode = GET_MODE (op0);
912 /* Now MODE is either some integral mode for a MEM as OP0,
913 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
914 The bit field is contained entirely within OP0.
915 BITPOS is the starting bit number within OP0.
916 (OP0's mode may actually be narrower than MODE.) */
918 if (BYTES_BIG_ENDIAN)
919 /* BITPOS is the distance between our msb
920 and that of the containing datum.
921 Convert it to the distance from the lsb. */
922 bitpos = total_bits - bitsize - bitpos;
924 /* Now BITPOS is always the distance between our lsb
925 and that of OP0. */
927 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
928 we must first convert its mode to MODE. */
930 if (GET_CODE (value) == CONST_INT)
932 HOST_WIDE_INT v = INTVAL (value);
934 if (bitsize < HOST_BITS_PER_WIDE_INT)
935 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
937 if (v == 0)
938 all_zero = 1;
939 else if ((bitsize < HOST_BITS_PER_WIDE_INT
940 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
941 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
942 all_one = 1;
944 value = lshift_value (mode, value, bitpos, bitsize);
946 else
948 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
949 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
951 if (GET_MODE (value) != mode)
952 value = convert_to_mode (mode, value, 1);
954 if (must_and)
955 value = expand_binop (mode, and_optab, value,
956 mask_rtx (mode, 0, bitsize, 0),
957 NULL_RTX, 1, OPTAB_LIB_WIDEN);
958 if (bitpos > 0)
959 value = expand_shift (LSHIFT_EXPR, mode, value,
960 build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
963 /* Now clear the chosen bits in OP0,
964 except that if VALUE is -1 we need not bother. */
965 /* We keep the intermediates in registers to allow CSE to combine
966 consecutive bitfield assignments. */
968 temp = force_reg (mode, op0);
970 if (! all_one)
972 temp = expand_binop (mode, and_optab, temp,
973 mask_rtx (mode, bitpos, bitsize, 1),
974 NULL_RTX, 1, OPTAB_LIB_WIDEN);
975 temp = force_reg (mode, temp);
978 /* Now logical-or VALUE into OP0, unless it is zero. */
980 if (! all_zero)
982 temp = expand_binop (mode, ior_optab, temp, value,
983 NULL_RTX, 1, OPTAB_LIB_WIDEN);
984 temp = force_reg (mode, temp);
987 if (op0 != temp)
989 op0 = copy_rtx (op0);
990 emit_move_insn (op0, temp);
994 /* Store a bit field that is split across multiple accessible memory objects.
996 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
997 BITSIZE is the field width; BITPOS the position of its first bit
998 (within the word).
999 VALUE is the value to store.
1001 This does not yet handle fields wider than BITS_PER_WORD. */
1003 static void
1004 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1005 unsigned HOST_WIDE_INT bitpos, rtx value)
1007 unsigned int unit;
1008 unsigned int bitsdone = 0;
1010 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1011 much at a time. */
1012 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1013 unit = BITS_PER_WORD;
1014 else
1015 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1017 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1018 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1019 that VALUE might be a floating-point constant. */
1020 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
1022 rtx word = gen_lowpart_common (word_mode, value);
1024 if (word && (value != word))
1025 value = word;
1026 else
1027 value = gen_lowpart_common (word_mode,
1028 force_reg (GET_MODE (value) != VOIDmode
1029 ? GET_MODE (value)
1030 : word_mode, value));
1033 while (bitsdone < bitsize)
1035 unsigned HOST_WIDE_INT thissize;
1036 rtx part, word;
1037 unsigned HOST_WIDE_INT thispos;
1038 unsigned HOST_WIDE_INT offset;
1040 offset = (bitpos + bitsdone) / unit;
1041 thispos = (bitpos + bitsdone) % unit;
1043 /* THISSIZE must not overrun a word boundary. Otherwise,
1044 store_fixed_bit_field will call us again, and we will mutually
1045 recurse forever. */
1046 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1047 thissize = MIN (thissize, unit - thispos);
1049 if (BYTES_BIG_ENDIAN)
1051 int total_bits;
1053 /* We must do an endian conversion exactly the same way as it is
1054 done in extract_bit_field, so that the two calls to
1055 extract_fixed_bit_field will have comparable arguments. */
1056 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1057 total_bits = BITS_PER_WORD;
1058 else
1059 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1061 /* Fetch successively less significant portions. */
1062 if (GET_CODE (value) == CONST_INT)
1063 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1064 >> (bitsize - bitsdone - thissize))
1065 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1066 else
1067 /* The args are chosen so that the last part includes the
1068 lsb. Give extract_bit_field the value it needs (with
1069 endianness compensation) to fetch the piece we want. */
1070 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1071 total_bits - bitsize + bitsdone,
1072 NULL_RTX, 1);
1074 else
1076 /* Fetch successively more significant portions. */
1077 if (GET_CODE (value) == CONST_INT)
1078 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1079 >> bitsdone)
1080 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1081 else
1082 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1083 bitsdone, NULL_RTX, 1);
1086 /* If OP0 is a register, then handle OFFSET here.
1088 When handling multiword bitfields, extract_bit_field may pass
1089 down a word_mode SUBREG of a larger REG for a bitfield that actually
1090 crosses a word boundary. Thus, for a SUBREG, we must find
1091 the current word starting from the base register. */
1092 if (GET_CODE (op0) == SUBREG)
1094 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1095 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1096 GET_MODE (SUBREG_REG (op0)));
1097 offset = 0;
1099 else if (REG_P (op0))
1101 word = operand_subword_force (op0, offset, GET_MODE (op0));
1102 offset = 0;
1104 else
1105 word = op0;
1107 /* OFFSET is in UNITs, and UNIT is in bits.
1108 store_fixed_bit_field wants offset in bytes. */
1109 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1110 thispos, part);
1111 bitsdone += thissize;
1115 /* A subroutine of extract_bit_field_1 that converts return value X
1116 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1117 to extract_bit_field. */
1119 static rtx
1120 convert_extracted_bit_field (rtx x, enum machine_mode mode,
1121 enum machine_mode tmode, bool unsignedp)
1123 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1124 return x;
1126 /* If the x mode is not a scalar integral, first convert to the
1127 integer mode of that size and then access it as a floating-point
1128 value via a SUBREG. */
1129 if (!SCALAR_INT_MODE_P (tmode))
1131 enum machine_mode smode;
1133 smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1134 x = convert_to_mode (smode, x, unsignedp);
1135 x = force_reg (smode, x);
1136 return gen_lowpart (tmode, x);
1139 return convert_to_mode (tmode, x, unsignedp);
1142 /* A subroutine of extract_bit_field, with the same arguments.
1143 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1144 if we can find no other means of implementing the operation.
1145 if FALLBACK_P is false, return NULL instead. */
1147 static rtx
1148 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1149 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1150 enum machine_mode mode, enum machine_mode tmode,
1151 bool fallback_p)
1153 unsigned int unit
1154 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1155 unsigned HOST_WIDE_INT offset, bitpos;
1156 rtx op0 = str_rtx;
1157 enum machine_mode int_mode;
1158 enum machine_mode ext_mode;
1159 enum machine_mode mode1;
1160 enum insn_code icode;
1161 int byte_offset;
1163 if (tmode == VOIDmode)
1164 tmode = mode;
1166 while (GET_CODE (op0) == SUBREG)
1168 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1169 op0 = SUBREG_REG (op0);
1172 /* If we have an out-of-bounds access to a register, just return an
1173 uninitialized register of the required mode. This can occur if the
1174 source code contains an out-of-bounds access to a small array. */
1175 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1176 return gen_reg_rtx (tmode);
1178 if (REG_P (op0)
1179 && mode == GET_MODE (op0)
1180 && bitnum == 0
1181 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1183 /* We're trying to extract a full register from itself. */
1184 return op0;
1187 /* See if we can get a better vector mode before extracting. */
1188 if (VECTOR_MODE_P (GET_MODE (op0))
1189 && !MEM_P (op0)
1190 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1192 enum machine_mode new_mode;
1193 int nunits = GET_MODE_NUNITS (GET_MODE (op0));
1195 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1196 new_mode = MIN_MODE_VECTOR_FLOAT;
1197 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1198 new_mode = MIN_MODE_VECTOR_FRACT;
1199 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1200 new_mode = MIN_MODE_VECTOR_UFRACT;
1201 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1202 new_mode = MIN_MODE_VECTOR_ACCUM;
1203 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1204 new_mode = MIN_MODE_VECTOR_UACCUM;
1205 else
1206 new_mode = MIN_MODE_VECTOR_INT;
1208 for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
1209 if (GET_MODE_NUNITS (new_mode) == nunits
1210 && GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1211 && targetm.vector_mode_supported_p (new_mode))
1212 break;
1213 if (new_mode != VOIDmode)
1214 op0 = gen_lowpart (new_mode, op0);
1217 /* Use vec_extract patterns for extracting parts of vectors whenever
1218 available. */
1219 if (VECTOR_MODE_P (GET_MODE (op0))
1220 && !MEM_P (op0)
1221 && (optab_handler (vec_extract_optab, GET_MODE (op0))->insn_code
1222 != CODE_FOR_nothing)
1223 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1224 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1226 enum machine_mode outermode = GET_MODE (op0);
1227 enum machine_mode innermode = GET_MODE_INNER (outermode);
1228 int icode = (int) optab_handler (vec_extract_optab, outermode)->insn_code;
1229 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1230 rtx rtxpos = GEN_INT (pos);
1231 rtx src = op0;
1232 rtx dest = NULL, pat, seq;
1233 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1234 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1235 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1237 if (innermode == tmode || innermode == mode)
1238 dest = target;
1240 if (!dest)
1241 dest = gen_reg_rtx (innermode);
1243 start_sequence ();
1245 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1246 dest = copy_to_mode_reg (mode0, dest);
1248 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1249 src = copy_to_mode_reg (mode1, src);
1251 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1252 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1254 /* We could handle this, but we should always be called with a pseudo
1255 for our targets and all insns should take them as outputs. */
1256 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1257 && (*insn_data[icode].operand[1].predicate) (src, mode1)
1258 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1260 pat = GEN_FCN (icode) (dest, src, rtxpos);
1261 seq = get_insns ();
1262 end_sequence ();
1263 if (pat)
1265 emit_insn (seq);
1266 emit_insn (pat);
1267 if (mode0 != mode)
1268 return gen_lowpart (tmode, dest);
1269 return dest;
1273 /* Make sure we are playing with integral modes. Pun with subregs
1274 if we aren't. */
1276 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1277 if (imode != GET_MODE (op0))
1279 if (MEM_P (op0))
1280 op0 = adjust_address (op0, imode, 0);
1281 else if (imode != BLKmode)
1283 op0 = gen_lowpart (imode, op0);
1285 /* If we got a SUBREG, force it into a register since we
1286 aren't going to be able to do another SUBREG on it. */
1287 if (GET_CODE (op0) == SUBREG)
1288 op0 = force_reg (imode, op0);
1290 else if (REG_P (op0))
1292 rtx reg, subreg;
1293 imode = smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0)),
1294 MODE_INT);
1295 reg = gen_reg_rtx (imode);
1296 subreg = gen_lowpart_SUBREG (GET_MODE (op0), reg);
1297 emit_move_insn (subreg, op0);
1298 op0 = reg;
1299 bitnum += SUBREG_BYTE (subreg) * BITS_PER_UNIT;
1301 else
1303 rtx mem = assign_stack_temp (GET_MODE (op0),
1304 GET_MODE_SIZE (GET_MODE (op0)), 0);
1305 emit_move_insn (mem, op0);
1306 op0 = adjust_address (mem, BLKmode, 0);
1311 /* We may be accessing data outside the field, which means
1312 we can alias adjacent data. */
1313 if (MEM_P (op0))
1315 op0 = shallow_copy_rtx (op0);
1316 set_mem_alias_set (op0, 0);
1317 set_mem_expr (op0, 0);
1320 /* Extraction of a full-word or multi-word value from a structure
1321 in a register or aligned memory can be done with just a SUBREG.
1322 A subword value in the least significant part of a register
1323 can also be extracted with a SUBREG. For this, we need the
1324 byte offset of the value in op0. */
1326 bitpos = bitnum % unit;
1327 offset = bitnum / unit;
1328 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1330 /* If OP0 is a register, BITPOS must count within a word.
1331 But as we have it, it counts within whatever size OP0 now has.
1332 On a bigendian machine, these are not the same, so convert. */
1333 if (BYTES_BIG_ENDIAN
1334 && !MEM_P (op0)
1335 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1336 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1338 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1339 If that's wrong, the solution is to test for it and set TARGET to 0
1340 if needed. */
1342 /* Only scalar integer modes can be converted via subregs. There is an
1343 additional problem for FP modes here in that they can have a precision
1344 which is different from the size. mode_for_size uses precision, but
1345 we want a mode based on the size, so we must avoid calling it for FP
1346 modes. */
1347 mode1 = (SCALAR_INT_MODE_P (tmode)
1348 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1349 : mode);
1351 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1352 && bitpos % BITS_PER_WORD == 0)
1353 || (mode1 != BLKmode
1354 /* ??? The big endian test here is wrong. This is correct
1355 if the value is in a register, and if mode_for_size is not
1356 the same mode as op0. This causes us to get unnecessarily
1357 inefficient code from the Thumb port when -mbig-endian. */
1358 && (BYTES_BIG_ENDIAN
1359 ? bitpos + bitsize == BITS_PER_WORD
1360 : bitpos == 0)))
1361 && ((!MEM_P (op0)
1362 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1),
1363 GET_MODE_BITSIZE (GET_MODE (op0)))
1364 && GET_MODE_SIZE (mode1) != 0
1365 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1366 || (MEM_P (op0)
1367 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1368 || (offset * BITS_PER_UNIT % bitsize == 0
1369 && MEM_ALIGN (op0) % bitsize == 0)))))
1371 if (MEM_P (op0))
1372 op0 = adjust_address (op0, mode1, offset);
1373 else if (mode1 != GET_MODE (op0))
1375 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1376 byte_offset);
1377 if (sub == NULL)
1378 goto no_subreg_mode_swap;
1379 op0 = sub;
1381 if (mode1 != mode)
1382 return convert_to_mode (tmode, op0, unsignedp);
1383 return op0;
1385 no_subreg_mode_swap:
1387 /* Handle fields bigger than a word. */
1389 if (bitsize > BITS_PER_WORD)
1391 /* Here we transfer the words of the field
1392 in the order least significant first.
1393 This is because the most significant word is the one which may
1394 be less than full. */
1396 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1397 unsigned int i;
1399 if (target == 0 || !REG_P (target))
1400 target = gen_reg_rtx (mode);
1402 /* Indicate for flow that the entire target reg is being set. */
1403 emit_clobber (target);
1405 for (i = 0; i < nwords; i++)
1407 /* If I is 0, use the low-order word in both field and target;
1408 if I is 1, use the next to lowest word; and so on. */
1409 /* Word number in TARGET to use. */
1410 unsigned int wordnum
1411 = (WORDS_BIG_ENDIAN
1412 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1413 : i);
1414 /* Offset from start of field in OP0. */
1415 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1416 ? MAX (0, ((int) bitsize - ((int) i + 1)
1417 * (int) BITS_PER_WORD))
1418 : (int) i * BITS_PER_WORD);
1419 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1420 rtx result_part
1421 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1422 bitsize - i * BITS_PER_WORD),
1423 bitnum + bit_offset, 1, target_part, mode,
1424 word_mode);
1426 gcc_assert (target_part);
1428 if (result_part != target_part)
1429 emit_move_insn (target_part, result_part);
1432 if (unsignedp)
1434 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1435 need to be zero'd out. */
1436 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1438 unsigned int i, total_words;
1440 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1441 for (i = nwords; i < total_words; i++)
1442 emit_move_insn
1443 (operand_subword (target,
1444 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1445 1, VOIDmode),
1446 const0_rtx);
1448 return target;
1451 /* Signed bit field: sign-extend with two arithmetic shifts. */
1452 target = expand_shift (LSHIFT_EXPR, mode, target,
1453 build_int_cst (NULL_TREE,
1454 GET_MODE_BITSIZE (mode) - bitsize),
1455 NULL_RTX, 0);
1456 return expand_shift (RSHIFT_EXPR, mode, target,
1457 build_int_cst (NULL_TREE,
1458 GET_MODE_BITSIZE (mode) - bitsize),
1459 NULL_RTX, 0);
1462 /* From here on we know the desired field is smaller than a word. */
1464 /* Check if there is a correspondingly-sized integer field, so we can
1465 safely extract it as one size of integer, if necessary; then
1466 truncate or extend to the size that is wanted; then use SUBREGs or
1467 convert_to_mode to get one of the modes we really wanted. */
1469 int_mode = int_mode_for_mode (tmode);
1470 if (int_mode == BLKmode)
1471 int_mode = int_mode_for_mode (mode);
1472 /* Should probably push op0 out to memory and then do a load. */
1473 gcc_assert (int_mode != BLKmode);
1475 /* OFFSET is the number of words or bytes (UNIT says which)
1476 from STR_RTX to the first word or byte containing part of the field. */
1477 if (!MEM_P (op0))
1479 if (offset != 0
1480 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1482 if (!REG_P (op0))
1483 op0 = copy_to_reg (op0);
1484 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1485 op0, (offset * UNITS_PER_WORD));
1487 offset = 0;
1490 /* Now OFFSET is nonzero only for memory operands. */
1491 ext_mode = mode_for_extraction (unsignedp ? EP_extzv : EP_extv, 0);
1492 icode = unsignedp ? CODE_FOR_extzv : CODE_FOR_extv;
1493 if (ext_mode != MAX_MACHINE_MODE
1494 && bitsize > 0
1495 && GET_MODE_BITSIZE (ext_mode) >= bitsize
1496 /* If op0 is a register, we need it in EXT_MODE to make it
1497 acceptable to the format of ext(z)v. */
1498 && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1499 && !((REG_P (op0) || GET_CODE (op0) == SUBREG)
1500 && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode)))
1501 && check_predicate_volatile_ok (icode, 1, op0, GET_MODE (op0)))
1503 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1504 rtx bitsize_rtx, bitpos_rtx;
1505 rtx last = get_last_insn ();
1506 rtx xop0 = op0;
1507 rtx xtarget = target;
1508 rtx xspec_target = target;
1509 rtx xspec_target_subreg = 0;
1510 rtx pat;
1512 /* If op0 is a register, we need it in EXT_MODE to make it
1513 acceptable to the format of ext(z)v. */
1514 if (REG_P (xop0) && GET_MODE (xop0) != ext_mode)
1515 xop0 = gen_rtx_SUBREG (ext_mode, xop0, 0);
1516 if (MEM_P (xop0))
1517 /* Get ref to first byte containing part of the field. */
1518 xop0 = adjust_address (xop0, byte_mode, xoffset);
1520 /* On big-endian machines, we count bits from the most significant.
1521 If the bit field insn does not, we must invert. */
1522 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1523 xbitpos = unit - bitsize - xbitpos;
1525 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1526 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1527 xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
1529 unit = GET_MODE_BITSIZE (ext_mode);
1531 if (xtarget == 0)
1532 xtarget = xspec_target = gen_reg_rtx (tmode);
1534 if (GET_MODE (xtarget) != ext_mode)
1536 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1537 between the mode of the extraction (word_mode) and the target
1538 mode. Instead, create a temporary and use convert_move to set
1539 the target. */
1540 if (REG_P (xtarget)
1541 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (xtarget)),
1542 GET_MODE_BITSIZE (ext_mode)))
1544 xtarget = gen_lowpart (ext_mode, xtarget);
1545 if (GET_MODE_SIZE (ext_mode)
1546 > GET_MODE_SIZE (GET_MODE (xspec_target)))
1547 xspec_target_subreg = xtarget;
1549 else
1550 xtarget = gen_reg_rtx (ext_mode);
1553 /* If this machine's ext(z)v insists on a register target,
1554 make sure we have one. */
1555 if (!insn_data[(int) icode].operand[0].predicate (xtarget, ext_mode))
1556 xtarget = gen_reg_rtx (ext_mode);
1558 bitsize_rtx = GEN_INT (bitsize);
1559 bitpos_rtx = GEN_INT (xbitpos);
1561 pat = (unsignedp
1562 ? gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx)
1563 : gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx));
1564 if (pat)
1566 emit_insn (pat);
1567 if (xtarget == xspec_target)
1568 return xtarget;
1569 if (xtarget == xspec_target_subreg)
1570 return xspec_target;
1571 return convert_extracted_bit_field (xtarget, mode, tmode, unsignedp);
1573 delete_insns_since (last);
1576 /* If OP0 is a memory, try copying it to a register and seeing if a
1577 cheap register alternative is available. */
1578 if (ext_mode != MAX_MACHINE_MODE && MEM_P (op0))
1580 enum machine_mode bestmode;
1582 /* Get the mode to use for inserting into this field. If
1583 OP0 is BLKmode, get the smallest mode consistent with the
1584 alignment. If OP0 is a non-BLKmode object that is no
1585 wider than EXT_MODE, use its mode. Otherwise, use the
1586 smallest mode containing the field. */
1588 if (GET_MODE (op0) == BLKmode
1589 || (ext_mode != MAX_MACHINE_MODE
1590 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (ext_mode)))
1591 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
1592 (ext_mode == MAX_MACHINE_MODE
1593 ? VOIDmode : ext_mode),
1594 MEM_VOLATILE_P (op0));
1595 else
1596 bestmode = GET_MODE (op0);
1598 if (bestmode != VOIDmode
1599 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
1600 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
1602 unsigned HOST_WIDE_INT xoffset, xbitpos;
1604 /* Compute the offset as a multiple of this unit,
1605 counting in bytes. */
1606 unit = GET_MODE_BITSIZE (bestmode);
1607 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1608 xbitpos = bitnum % unit;
1610 /* Make sure the register is big enough for the whole field. */
1611 if (xoffset * BITS_PER_UNIT + unit
1612 >= offset * BITS_PER_UNIT + bitsize)
1614 rtx last, result, xop0;
1616 last = get_last_insn ();
1618 /* Fetch it to a register in that size. */
1619 xop0 = adjust_address (op0, bestmode, xoffset);
1620 xop0 = force_reg (bestmode, xop0);
1621 result = extract_bit_field_1 (xop0, bitsize, xbitpos,
1622 unsignedp, target,
1623 mode, tmode, false);
1624 if (result)
1625 return result;
1627 delete_insns_since (last);
1632 if (!fallback_p)
1633 return NULL;
1635 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1636 bitpos, target, unsignedp);
1637 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1640 /* Generate code to extract a byte-field from STR_RTX
1641 containing BITSIZE bits, starting at BITNUM,
1642 and put it in TARGET if possible (if TARGET is nonzero).
1643 Regardless of TARGET, we return the rtx for where the value is placed.
1645 STR_RTX is the structure containing the byte (a REG or MEM).
1646 UNSIGNEDP is nonzero if this is an unsigned bit field.
1647 MODE is the natural mode of the field value once extracted.
1648 TMODE is the mode the caller would like the value to have;
1649 but the value may be returned with type MODE instead.
1651 If a TARGET is specified and we can store in it at no extra cost,
1652 we do so, and return TARGET.
1653 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1654 if they are equally easy. */
1657 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1658 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1659 enum machine_mode mode, enum machine_mode tmode)
1661 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
1662 target, mode, tmode, true);
1665 /* Extract a bit field using shifts and boolean operations
1666 Returns an rtx to represent the value.
1667 OP0 addresses a register (word) or memory (byte).
1668 BITPOS says which bit within the word or byte the bit field starts in.
1669 OFFSET says how many bytes farther the bit field starts;
1670 it is 0 if OP0 is a register.
1671 BITSIZE says how many bits long the bit field is.
1672 (If OP0 is a register, it may be narrower than a full word,
1673 but BITPOS still counts within a full word,
1674 which is significant on bigendian machines.)
1676 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1677 If TARGET is nonzero, attempts to store the value there
1678 and return TARGET, but this is not guaranteed.
1679 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1681 static rtx
1682 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1683 unsigned HOST_WIDE_INT offset,
1684 unsigned HOST_WIDE_INT bitsize,
1685 unsigned HOST_WIDE_INT bitpos, rtx target,
1686 int unsignedp)
1688 unsigned int total_bits = BITS_PER_WORD;
1689 enum machine_mode mode;
1691 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1693 /* Special treatment for a bit field split across two registers. */
1694 if (bitsize + bitpos > BITS_PER_WORD)
1695 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1697 else
1699 /* Get the proper mode to use for this field. We want a mode that
1700 includes the entire field. If such a mode would be larger than
1701 a word, we won't be doing the extraction the normal way. */
1703 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1704 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1706 if (mode == VOIDmode)
1707 /* The only way this should occur is if the field spans word
1708 boundaries. */
1709 return extract_split_bit_field (op0, bitsize,
1710 bitpos + offset * BITS_PER_UNIT,
1711 unsignedp);
1713 total_bits = GET_MODE_BITSIZE (mode);
1715 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1716 be in the range 0 to total_bits-1, and put any excess bytes in
1717 OFFSET. */
1718 if (bitpos >= total_bits)
1720 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1721 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1722 * BITS_PER_UNIT);
1725 /* Get ref to an aligned byte, halfword, or word containing the field.
1726 Adjust BITPOS to be position within a word,
1727 and OFFSET to be the offset of that word.
1728 Then alter OP0 to refer to that word. */
1729 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1730 offset -= (offset % (total_bits / BITS_PER_UNIT));
1731 op0 = adjust_address (op0, mode, offset);
1734 mode = GET_MODE (op0);
1736 if (BYTES_BIG_ENDIAN)
1737 /* BITPOS is the distance between our msb and that of OP0.
1738 Convert it to the distance from the lsb. */
1739 bitpos = total_bits - bitsize - bitpos;
1741 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1742 We have reduced the big-endian case to the little-endian case. */
1744 if (unsignedp)
1746 if (bitpos)
1748 /* If the field does not already start at the lsb,
1749 shift it so it does. */
1750 tree amount = build_int_cst (NULL_TREE, bitpos);
1751 /* Maybe propagate the target for the shift. */
1752 /* But not if we will return it--could confuse integrate.c. */
1753 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1754 if (tmode != mode) subtarget = 0;
1755 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1757 /* Convert the value to the desired mode. */
1758 if (mode != tmode)
1759 op0 = convert_to_mode (tmode, op0, 1);
1761 /* Unless the msb of the field used to be the msb when we shifted,
1762 mask out the upper bits. */
1764 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1765 return expand_binop (GET_MODE (op0), and_optab, op0,
1766 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1767 target, 1, OPTAB_LIB_WIDEN);
1768 return op0;
1771 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1772 then arithmetic-shift its lsb to the lsb of the word. */
1773 op0 = force_reg (mode, op0);
1774 if (mode != tmode)
1775 target = 0;
1777 /* Find the narrowest integer mode that contains the field. */
1779 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1780 mode = GET_MODE_WIDER_MODE (mode))
1781 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1783 op0 = convert_to_mode (mode, op0, 0);
1784 break;
1787 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1789 tree amount
1790 = build_int_cst (NULL_TREE,
1791 GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1792 /* Maybe propagate the target for the shift. */
1793 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1794 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1797 return expand_shift (RSHIFT_EXPR, mode, op0,
1798 build_int_cst (NULL_TREE,
1799 GET_MODE_BITSIZE (mode) - bitsize),
1800 target, 0);
1803 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1804 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1805 complement of that if COMPLEMENT. The mask is truncated if
1806 necessary to the width of mode MODE. The mask is zero-extended if
1807 BITSIZE+BITPOS is too small for MODE. */
1809 static rtx
1810 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1812 HOST_WIDE_INT masklow, maskhigh;
1814 if (bitsize == 0)
1815 masklow = 0;
1816 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1817 masklow = (HOST_WIDE_INT) -1 << bitpos;
1818 else
1819 masklow = 0;
1821 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1822 masklow &= ((unsigned HOST_WIDE_INT) -1
1823 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1825 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1826 maskhigh = -1;
1827 else
1828 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1830 if (bitsize == 0)
1831 maskhigh = 0;
1832 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1833 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1834 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1835 else
1836 maskhigh = 0;
1838 if (complement)
1840 maskhigh = ~maskhigh;
1841 masklow = ~masklow;
1844 return immed_double_const (masklow, maskhigh, mode);
1847 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1848 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1850 static rtx
1851 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1853 unsigned HOST_WIDE_INT v = INTVAL (value);
1854 HOST_WIDE_INT low, high;
1856 if (bitsize < HOST_BITS_PER_WIDE_INT)
1857 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1859 if (bitpos < HOST_BITS_PER_WIDE_INT)
1861 low = v << bitpos;
1862 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1864 else
1866 low = 0;
1867 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1870 return immed_double_const (low, high, mode);
1873 /* Extract a bit field that is split across two words
1874 and return an RTX for the result.
1876 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1877 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1878 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1880 static rtx
1881 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1882 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1884 unsigned int unit;
1885 unsigned int bitsdone = 0;
1886 rtx result = NULL_RTX;
1887 int first = 1;
1889 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1890 much at a time. */
1891 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1892 unit = BITS_PER_WORD;
1893 else
1894 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1896 while (bitsdone < bitsize)
1898 unsigned HOST_WIDE_INT thissize;
1899 rtx part, word;
1900 unsigned HOST_WIDE_INT thispos;
1901 unsigned HOST_WIDE_INT offset;
1903 offset = (bitpos + bitsdone) / unit;
1904 thispos = (bitpos + bitsdone) % unit;
1906 /* THISSIZE must not overrun a word boundary. Otherwise,
1907 extract_fixed_bit_field will call us again, and we will mutually
1908 recurse forever. */
1909 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1910 thissize = MIN (thissize, unit - thispos);
1912 /* If OP0 is a register, then handle OFFSET here.
1914 When handling multiword bitfields, extract_bit_field may pass
1915 down a word_mode SUBREG of a larger REG for a bitfield that actually
1916 crosses a word boundary. Thus, for a SUBREG, we must find
1917 the current word starting from the base register. */
1918 if (GET_CODE (op0) == SUBREG)
1920 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1921 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1922 GET_MODE (SUBREG_REG (op0)));
1923 offset = 0;
1925 else if (REG_P (op0))
1927 word = operand_subword_force (op0, offset, GET_MODE (op0));
1928 offset = 0;
1930 else
1931 word = op0;
1933 /* Extract the parts in bit-counting order,
1934 whose meaning is determined by BYTES_PER_UNIT.
1935 OFFSET is in UNITs, and UNIT is in bits.
1936 extract_fixed_bit_field wants offset in bytes. */
1937 part = extract_fixed_bit_field (word_mode, word,
1938 offset * unit / BITS_PER_UNIT,
1939 thissize, thispos, 0, 1);
1940 bitsdone += thissize;
1942 /* Shift this part into place for the result. */
1943 if (BYTES_BIG_ENDIAN)
1945 if (bitsize != bitsdone)
1946 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1947 build_int_cst (NULL_TREE, bitsize - bitsdone),
1948 0, 1);
1950 else
1952 if (bitsdone != thissize)
1953 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1954 build_int_cst (NULL_TREE,
1955 bitsdone - thissize), 0, 1);
1958 if (first)
1959 result = part;
1960 else
1961 /* Combine the parts with bitwise or. This works
1962 because we extracted each part as an unsigned bit field. */
1963 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1964 OPTAB_LIB_WIDEN);
1966 first = 0;
1969 /* Unsigned bit field: we are done. */
1970 if (unsignedp)
1971 return result;
1972 /* Signed bit field: sign-extend with two arithmetic shifts. */
1973 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1974 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
1975 NULL_RTX, 0);
1976 return expand_shift (RSHIFT_EXPR, word_mode, result,
1977 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
1978 NULL_RTX, 0);
1981 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
1982 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
1983 MODE, fill the upper bits with zeros. Fail if the layout of either
1984 mode is unknown (as for CC modes) or if the extraction would involve
1985 unprofitable mode punning. Return the value on success, otherwise
1986 return null.
1988 This is different from gen_lowpart* in these respects:
1990 - the returned value must always be considered an rvalue
1992 - when MODE is wider than SRC_MODE, the extraction involves
1993 a zero extension
1995 - when MODE is smaller than SRC_MODE, the extraction involves
1996 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
1998 In other words, this routine performs a computation, whereas the
1999 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2000 operations. */
2003 extract_low_bits (enum machine_mode mode, enum machine_mode src_mode, rtx src)
2005 enum machine_mode int_mode, src_int_mode;
2007 if (mode == src_mode)
2008 return src;
2010 if (CONSTANT_P (src))
2012 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2013 fails, it will happily create (subreg (symbol_ref)) or similar
2014 invalid SUBREGs. */
2015 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
2016 rtx ret = simplify_subreg (mode, src, src_mode, byte);
2017 if (ret)
2018 return ret;
2020 if (GET_MODE (src) == VOIDmode
2021 || !validate_subreg (mode, src_mode, src, byte))
2022 return NULL_RTX;
2024 src = force_reg (GET_MODE (src), src);
2025 return gen_rtx_SUBREG (mode, src, byte);
2028 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2029 return NULL_RTX;
2031 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
2032 && MODES_TIEABLE_P (mode, src_mode))
2034 rtx x = gen_lowpart_common (mode, src);
2035 if (x)
2036 return x;
2039 src_int_mode = int_mode_for_mode (src_mode);
2040 int_mode = int_mode_for_mode (mode);
2041 if (src_int_mode == BLKmode || int_mode == BLKmode)
2042 return NULL_RTX;
2044 if (!MODES_TIEABLE_P (src_int_mode, src_mode))
2045 return NULL_RTX;
2046 if (!MODES_TIEABLE_P (int_mode, mode))
2047 return NULL_RTX;
2049 src = gen_lowpart (src_int_mode, src);
2050 src = convert_modes (int_mode, src_int_mode, src, true);
2051 src = gen_lowpart (mode, src);
2052 return src;
2055 /* Add INC into TARGET. */
2057 void
2058 expand_inc (rtx target, rtx inc)
2060 rtx value = expand_binop (GET_MODE (target), add_optab,
2061 target, inc,
2062 target, 0, OPTAB_LIB_WIDEN);
2063 if (value != target)
2064 emit_move_insn (target, value);
2067 /* Subtract DEC from TARGET. */
2069 void
2070 expand_dec (rtx target, rtx dec)
2072 rtx value = expand_binop (GET_MODE (target), sub_optab,
2073 target, dec,
2074 target, 0, OPTAB_LIB_WIDEN);
2075 if (value != target)
2076 emit_move_insn (target, value);
2079 /* Output a shift instruction for expression code CODE,
2080 with SHIFTED being the rtx for the value to shift,
2081 and AMOUNT the tree for the amount to shift by.
2082 Store the result in the rtx TARGET, if that is convenient.
2083 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2084 Return the rtx for where the value is. */
2087 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2088 tree amount, rtx target, int unsignedp)
2090 rtx op1, temp = 0;
2091 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2092 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2093 optab lshift_optab = ashl_optab;
2094 optab rshift_arith_optab = ashr_optab;
2095 optab rshift_uns_optab = lshr_optab;
2096 optab lrotate_optab = rotl_optab;
2097 optab rrotate_optab = rotr_optab;
2098 enum machine_mode op1_mode;
2099 int attempt;
2100 bool speed = optimize_insn_for_speed_p ();
2102 op1 = expand_normal (amount);
2103 op1_mode = GET_MODE (op1);
2105 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2106 shift amount is a vector, use the vector/vector shift patterns. */
2107 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2109 lshift_optab = vashl_optab;
2110 rshift_arith_optab = vashr_optab;
2111 rshift_uns_optab = vlshr_optab;
2112 lrotate_optab = vrotl_optab;
2113 rrotate_optab = vrotr_optab;
2116 /* Previously detected shift-counts computed by NEGATE_EXPR
2117 and shifted in the other direction; but that does not work
2118 on all machines. */
2120 if (SHIFT_COUNT_TRUNCATED)
2122 if (GET_CODE (op1) == CONST_INT
2123 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2124 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2125 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2126 % GET_MODE_BITSIZE (mode));
2127 else if (GET_CODE (op1) == SUBREG
2128 && subreg_lowpart_p (op1))
2129 op1 = SUBREG_REG (op1);
2132 if (op1 == const0_rtx)
2133 return shifted;
2135 /* Check whether its cheaper to implement a left shift by a constant
2136 bit count by a sequence of additions. */
2137 if (code == LSHIFT_EXPR
2138 && GET_CODE (op1) == CONST_INT
2139 && INTVAL (op1) > 0
2140 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2141 && INTVAL (op1) < MAX_BITS_PER_WORD
2142 && shift_cost[speed][mode][INTVAL (op1)] > INTVAL (op1) * add_cost[speed][mode]
2143 && shift_cost[speed][mode][INTVAL (op1)] != MAX_COST)
2145 int i;
2146 for (i = 0; i < INTVAL (op1); i++)
2148 temp = force_reg (mode, shifted);
2149 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2150 unsignedp, OPTAB_LIB_WIDEN);
2152 return shifted;
2155 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2157 enum optab_methods methods;
2159 if (attempt == 0)
2160 methods = OPTAB_DIRECT;
2161 else if (attempt == 1)
2162 methods = OPTAB_WIDEN;
2163 else
2164 methods = OPTAB_LIB_WIDEN;
2166 if (rotate)
2168 /* Widening does not work for rotation. */
2169 if (methods == OPTAB_WIDEN)
2170 continue;
2171 else if (methods == OPTAB_LIB_WIDEN)
2173 /* If we have been unable to open-code this by a rotation,
2174 do it as the IOR of two shifts. I.e., to rotate A
2175 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2176 where C is the bitsize of A.
2178 It is theoretically possible that the target machine might
2179 not be able to perform either shift and hence we would
2180 be making two libcalls rather than just the one for the
2181 shift (similarly if IOR could not be done). We will allow
2182 this extremely unlikely lossage to avoid complicating the
2183 code below. */
2185 rtx subtarget = target == shifted ? 0 : target;
2186 tree new_amount, other_amount;
2187 rtx temp1;
2188 tree type = TREE_TYPE (amount);
2189 if (GET_MODE (op1) != TYPE_MODE (type)
2190 && GET_MODE (op1) != VOIDmode)
2191 op1 = convert_to_mode (TYPE_MODE (type), op1, 1);
2192 new_amount = make_tree (type, op1);
2193 other_amount
2194 = fold_build2 (MINUS_EXPR, type,
2195 build_int_cst (type, GET_MODE_BITSIZE (mode)),
2196 new_amount);
2198 shifted = force_reg (mode, shifted);
2200 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2201 mode, shifted, new_amount, 0, 1);
2202 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2203 mode, shifted, other_amount, subtarget, 1);
2204 return expand_binop (mode, ior_optab, temp, temp1, target,
2205 unsignedp, methods);
2208 temp = expand_binop (mode,
2209 left ? lrotate_optab : rrotate_optab,
2210 shifted, op1, target, unsignedp, methods);
2212 else if (unsignedp)
2213 temp = expand_binop (mode,
2214 left ? lshift_optab : rshift_uns_optab,
2215 shifted, op1, target, unsignedp, methods);
2217 /* Do arithmetic shifts.
2218 Also, if we are going to widen the operand, we can just as well
2219 use an arithmetic right-shift instead of a logical one. */
2220 if (temp == 0 && ! rotate
2221 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2223 enum optab_methods methods1 = methods;
2225 /* If trying to widen a log shift to an arithmetic shift,
2226 don't accept an arithmetic shift of the same size. */
2227 if (unsignedp)
2228 methods1 = OPTAB_MUST_WIDEN;
2230 /* Arithmetic shift */
2232 temp = expand_binop (mode,
2233 left ? lshift_optab : rshift_arith_optab,
2234 shifted, op1, target, unsignedp, methods1);
2237 /* We used to try extzv here for logical right shifts, but that was
2238 only useful for one machine, the VAX, and caused poor code
2239 generation there for lshrdi3, so the code was deleted and a
2240 define_expand for lshrsi3 was added to vax.md. */
2243 gcc_assert (temp);
2244 return temp;
2247 enum alg_code {
2248 alg_unknown,
2249 alg_zero,
2250 alg_m, alg_shift,
2251 alg_add_t_m2,
2252 alg_sub_t_m2,
2253 alg_add_factor,
2254 alg_sub_factor,
2255 alg_add_t2_m,
2256 alg_sub_t2_m,
2257 alg_impossible
2260 /* This structure holds the "cost" of a multiply sequence. The
2261 "cost" field holds the total rtx_cost of every operator in the
2262 synthetic multiplication sequence, hence cost(a op b) is defined
2263 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2264 The "latency" field holds the minimum possible latency of the
2265 synthetic multiply, on a hypothetical infinitely parallel CPU.
2266 This is the critical path, or the maximum height, of the expression
2267 tree which is the sum of rtx_costs on the most expensive path from
2268 any leaf to the root. Hence latency(a op b) is defined as zero for
2269 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2271 struct mult_cost {
2272 short cost; /* Total rtx_cost of the multiplication sequence. */
2273 short latency; /* The latency of the multiplication sequence. */
2276 /* This macro is used to compare a pointer to a mult_cost against an
2277 single integer "rtx_cost" value. This is equivalent to the macro
2278 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2279 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2280 || ((X)->cost == (Y) && (X)->latency < (Y)))
2282 /* This macro is used to compare two pointers to mult_costs against
2283 each other. The macro returns true if X is cheaper than Y.
2284 Currently, the cheaper of two mult_costs is the one with the
2285 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2286 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2287 || ((X)->cost == (Y)->cost \
2288 && (X)->latency < (Y)->latency))
2290 /* This structure records a sequence of operations.
2291 `ops' is the number of operations recorded.
2292 `cost' is their total cost.
2293 The operations are stored in `op' and the corresponding
2294 logarithms of the integer coefficients in `log'.
2296 These are the operations:
2297 alg_zero total := 0;
2298 alg_m total := multiplicand;
2299 alg_shift total := total * coeff
2300 alg_add_t_m2 total := total + multiplicand * coeff;
2301 alg_sub_t_m2 total := total - multiplicand * coeff;
2302 alg_add_factor total := total * coeff + total;
2303 alg_sub_factor total := total * coeff - total;
2304 alg_add_t2_m total := total * coeff + multiplicand;
2305 alg_sub_t2_m total := total * coeff - multiplicand;
2307 The first operand must be either alg_zero or alg_m. */
2309 struct algorithm
2311 struct mult_cost cost;
2312 short ops;
2313 /* The size of the OP and LOG fields are not directly related to the
2314 word size, but the worst-case algorithms will be if we have few
2315 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2316 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2317 in total wordsize operations. */
2318 enum alg_code op[MAX_BITS_PER_WORD];
2319 char log[MAX_BITS_PER_WORD];
2322 /* The entry for our multiplication cache/hash table. */
2323 struct alg_hash_entry {
2324 /* The number we are multiplying by. */
2325 unsigned HOST_WIDE_INT t;
2327 /* The mode in which we are multiplying something by T. */
2328 enum machine_mode mode;
2330 /* The best multiplication algorithm for t. */
2331 enum alg_code alg;
2333 /* The cost of multiplication if ALG_CODE is not alg_impossible.
2334 Otherwise, the cost within which multiplication by T is
2335 impossible. */
2336 struct mult_cost cost;
2338 /* OPtimized for speed? */
2339 bool speed;
2342 /* The number of cache/hash entries. */
2343 #if HOST_BITS_PER_WIDE_INT == 64
2344 #define NUM_ALG_HASH_ENTRIES 1031
2345 #else
2346 #define NUM_ALG_HASH_ENTRIES 307
2347 #endif
2349 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2350 actually a hash table. If we have a collision, that the older
2351 entry is kicked out. */
2352 static struct alg_hash_entry alg_hash[NUM_ALG_HASH_ENTRIES];
2354 /* Indicates the type of fixup needed after a constant multiplication.
2355 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2356 the result should be negated, and ADD_VARIANT means that the
2357 multiplicand should be added to the result. */
2358 enum mult_variant {basic_variant, negate_variant, add_variant};
2360 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2361 const struct mult_cost *, enum machine_mode mode);
2362 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2363 struct algorithm *, enum mult_variant *, int);
2364 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2365 const struct algorithm *, enum mult_variant);
2366 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2367 int, rtx *, int *, int *);
2368 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2369 static rtx extract_high_half (enum machine_mode, rtx);
2370 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2371 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2372 int, int);
2373 /* Compute and return the best algorithm for multiplying by T.
2374 The algorithm must cost less than cost_limit
2375 If retval.cost >= COST_LIMIT, no algorithm was found and all
2376 other field of the returned struct are undefined.
2377 MODE is the machine mode of the multiplication. */
2379 static void
2380 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2381 const struct mult_cost *cost_limit, enum machine_mode mode)
2383 int m;
2384 struct algorithm *alg_in, *best_alg;
2385 struct mult_cost best_cost;
2386 struct mult_cost new_limit;
2387 int op_cost, op_latency;
2388 unsigned HOST_WIDE_INT q;
2389 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2390 int hash_index;
2391 bool cache_hit = false;
2392 enum alg_code cache_alg = alg_zero;
2393 bool speed = optimize_insn_for_speed_p ();
2395 /* Indicate that no algorithm is yet found. If no algorithm
2396 is found, this value will be returned and indicate failure. */
2397 alg_out->cost.cost = cost_limit->cost + 1;
2398 alg_out->cost.latency = cost_limit->latency + 1;
2400 if (cost_limit->cost < 0
2401 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2402 return;
2404 /* Restrict the bits of "t" to the multiplication's mode. */
2405 t &= GET_MODE_MASK (mode);
2407 /* t == 1 can be done in zero cost. */
2408 if (t == 1)
2410 alg_out->ops = 1;
2411 alg_out->cost.cost = 0;
2412 alg_out->cost.latency = 0;
2413 alg_out->op[0] = alg_m;
2414 return;
2417 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2418 fail now. */
2419 if (t == 0)
2421 if (MULT_COST_LESS (cost_limit, zero_cost[speed]))
2422 return;
2423 else
2425 alg_out->ops = 1;
2426 alg_out->cost.cost = zero_cost[speed];
2427 alg_out->cost.latency = zero_cost[speed];
2428 alg_out->op[0] = alg_zero;
2429 return;
2433 /* We'll be needing a couple extra algorithm structures now. */
2435 alg_in = XALLOCA (struct algorithm);
2436 best_alg = XALLOCA (struct algorithm);
2437 best_cost = *cost_limit;
2439 /* Compute the hash index. */
2440 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2442 /* See if we already know what to do for T. */
2443 if (alg_hash[hash_index].t == t
2444 && alg_hash[hash_index].mode == mode
2445 && alg_hash[hash_index].mode == mode
2446 && alg_hash[hash_index].speed == speed
2447 && alg_hash[hash_index].alg != alg_unknown)
2449 cache_alg = alg_hash[hash_index].alg;
2451 if (cache_alg == alg_impossible)
2453 /* The cache tells us that it's impossible to synthesize
2454 multiplication by T within alg_hash[hash_index].cost. */
2455 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2456 /* COST_LIMIT is at least as restrictive as the one
2457 recorded in the hash table, in which case we have no
2458 hope of synthesizing a multiplication. Just
2459 return. */
2460 return;
2462 /* If we get here, COST_LIMIT is less restrictive than the
2463 one recorded in the hash table, so we may be able to
2464 synthesize a multiplication. Proceed as if we didn't
2465 have the cache entry. */
2467 else
2469 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2470 /* The cached algorithm shows that this multiplication
2471 requires more cost than COST_LIMIT. Just return. This
2472 way, we don't clobber this cache entry with
2473 alg_impossible but retain useful information. */
2474 return;
2476 cache_hit = true;
2478 switch (cache_alg)
2480 case alg_shift:
2481 goto do_alg_shift;
2483 case alg_add_t_m2:
2484 case alg_sub_t_m2:
2485 goto do_alg_addsub_t_m2;
2487 case alg_add_factor:
2488 case alg_sub_factor:
2489 goto do_alg_addsub_factor;
2491 case alg_add_t2_m:
2492 goto do_alg_add_t2_m;
2494 case alg_sub_t2_m:
2495 goto do_alg_sub_t2_m;
2497 default:
2498 gcc_unreachable ();
2503 /* If we have a group of zero bits at the low-order part of T, try
2504 multiplying by the remaining bits and then doing a shift. */
2506 if ((t & 1) == 0)
2508 do_alg_shift:
2509 m = floor_log2 (t & -t); /* m = number of low zero bits */
2510 if (m < maxm)
2512 q = t >> m;
2513 /* The function expand_shift will choose between a shift and
2514 a sequence of additions, so the observed cost is given as
2515 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2516 op_cost = m * add_cost[speed][mode];
2517 if (shift_cost[speed][mode][m] < op_cost)
2518 op_cost = shift_cost[speed][mode][m];
2519 new_limit.cost = best_cost.cost - op_cost;
2520 new_limit.latency = best_cost.latency - op_cost;
2521 synth_mult (alg_in, q, &new_limit, mode);
2523 alg_in->cost.cost += op_cost;
2524 alg_in->cost.latency += op_cost;
2525 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2527 struct algorithm *x;
2528 best_cost = alg_in->cost;
2529 x = alg_in, alg_in = best_alg, best_alg = x;
2530 best_alg->log[best_alg->ops] = m;
2531 best_alg->op[best_alg->ops] = alg_shift;
2534 if (cache_hit)
2535 goto done;
2538 /* If we have an odd number, add or subtract one. */
2539 if ((t & 1) != 0)
2541 unsigned HOST_WIDE_INT w;
2543 do_alg_addsub_t_m2:
2544 for (w = 1; (w & t) != 0; w <<= 1)
2546 /* If T was -1, then W will be zero after the loop. This is another
2547 case where T ends with ...111. Handling this with (T + 1) and
2548 subtract 1 produces slightly better code and results in algorithm
2549 selection much faster than treating it like the ...0111 case
2550 below. */
2551 if (w == 0
2552 || (w > 2
2553 /* Reject the case where t is 3.
2554 Thus we prefer addition in that case. */
2555 && t != 3))
2557 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2559 op_cost = add_cost[speed][mode];
2560 new_limit.cost = best_cost.cost - op_cost;
2561 new_limit.latency = best_cost.latency - op_cost;
2562 synth_mult (alg_in, t + 1, &new_limit, mode);
2564 alg_in->cost.cost += op_cost;
2565 alg_in->cost.latency += op_cost;
2566 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2568 struct algorithm *x;
2569 best_cost = alg_in->cost;
2570 x = alg_in, alg_in = best_alg, best_alg = x;
2571 best_alg->log[best_alg->ops] = 0;
2572 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2575 else
2577 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2579 op_cost = add_cost[speed][mode];
2580 new_limit.cost = best_cost.cost - op_cost;
2581 new_limit.latency = best_cost.latency - op_cost;
2582 synth_mult (alg_in, t - 1, &new_limit, mode);
2584 alg_in->cost.cost += op_cost;
2585 alg_in->cost.latency += op_cost;
2586 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2588 struct algorithm *x;
2589 best_cost = alg_in->cost;
2590 x = alg_in, alg_in = best_alg, best_alg = x;
2591 best_alg->log[best_alg->ops] = 0;
2592 best_alg->op[best_alg->ops] = alg_add_t_m2;
2595 if (cache_hit)
2596 goto done;
2599 /* Look for factors of t of the form
2600 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2601 If we find such a factor, we can multiply by t using an algorithm that
2602 multiplies by q, shift the result by m and add/subtract it to itself.
2604 We search for large factors first and loop down, even if large factors
2605 are less probable than small; if we find a large factor we will find a
2606 good sequence quickly, and therefore be able to prune (by decreasing
2607 COST_LIMIT) the search. */
2609 do_alg_addsub_factor:
2610 for (m = floor_log2 (t - 1); m >= 2; m--)
2612 unsigned HOST_WIDE_INT d;
2614 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2615 if (t % d == 0 && t > d && m < maxm
2616 && (!cache_hit || cache_alg == alg_add_factor))
2618 /* If the target has a cheap shift-and-add instruction use
2619 that in preference to a shift insn followed by an add insn.
2620 Assume that the shift-and-add is "atomic" with a latency
2621 equal to its cost, otherwise assume that on superscalar
2622 hardware the shift may be executed concurrently with the
2623 earlier steps in the algorithm. */
2624 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2625 if (shiftadd_cost[speed][mode][m] < op_cost)
2627 op_cost = shiftadd_cost[speed][mode][m];
2628 op_latency = op_cost;
2630 else
2631 op_latency = add_cost[speed][mode];
2633 new_limit.cost = best_cost.cost - op_cost;
2634 new_limit.latency = best_cost.latency - op_latency;
2635 synth_mult (alg_in, t / d, &new_limit, mode);
2637 alg_in->cost.cost += op_cost;
2638 alg_in->cost.latency += op_latency;
2639 if (alg_in->cost.latency < op_cost)
2640 alg_in->cost.latency = op_cost;
2641 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2643 struct algorithm *x;
2644 best_cost = alg_in->cost;
2645 x = alg_in, alg_in = best_alg, best_alg = x;
2646 best_alg->log[best_alg->ops] = m;
2647 best_alg->op[best_alg->ops] = alg_add_factor;
2649 /* Other factors will have been taken care of in the recursion. */
2650 break;
2653 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2654 if (t % d == 0 && t > d && m < maxm
2655 && (!cache_hit || cache_alg == alg_sub_factor))
2657 /* If the target has a cheap shift-and-subtract insn use
2658 that in preference to a shift insn followed by a sub insn.
2659 Assume that the shift-and-sub is "atomic" with a latency
2660 equal to it's cost, otherwise assume that on superscalar
2661 hardware the shift may be executed concurrently with the
2662 earlier steps in the algorithm. */
2663 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2664 if (shiftsub_cost[speed][mode][m] < op_cost)
2666 op_cost = shiftsub_cost[speed][mode][m];
2667 op_latency = op_cost;
2669 else
2670 op_latency = add_cost[speed][mode];
2672 new_limit.cost = best_cost.cost - op_cost;
2673 new_limit.latency = best_cost.latency - op_latency;
2674 synth_mult (alg_in, t / d, &new_limit, mode);
2676 alg_in->cost.cost += op_cost;
2677 alg_in->cost.latency += op_latency;
2678 if (alg_in->cost.latency < op_cost)
2679 alg_in->cost.latency = op_cost;
2680 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2682 struct algorithm *x;
2683 best_cost = alg_in->cost;
2684 x = alg_in, alg_in = best_alg, best_alg = x;
2685 best_alg->log[best_alg->ops] = m;
2686 best_alg->op[best_alg->ops] = alg_sub_factor;
2688 break;
2691 if (cache_hit)
2692 goto done;
2694 /* Try shift-and-add (load effective address) instructions,
2695 i.e. do a*3, a*5, a*9. */
2696 if ((t & 1) != 0)
2698 do_alg_add_t2_m:
2699 q = t - 1;
2700 q = q & -q;
2701 m = exact_log2 (q);
2702 if (m >= 0 && m < maxm)
2704 op_cost = shiftadd_cost[speed][mode][m];
2705 new_limit.cost = best_cost.cost - op_cost;
2706 new_limit.latency = best_cost.latency - op_cost;
2707 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2709 alg_in->cost.cost += op_cost;
2710 alg_in->cost.latency += op_cost;
2711 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2713 struct algorithm *x;
2714 best_cost = alg_in->cost;
2715 x = alg_in, alg_in = best_alg, best_alg = x;
2716 best_alg->log[best_alg->ops] = m;
2717 best_alg->op[best_alg->ops] = alg_add_t2_m;
2720 if (cache_hit)
2721 goto done;
2723 do_alg_sub_t2_m:
2724 q = t + 1;
2725 q = q & -q;
2726 m = exact_log2 (q);
2727 if (m >= 0 && m < maxm)
2729 op_cost = shiftsub_cost[speed][mode][m];
2730 new_limit.cost = best_cost.cost - op_cost;
2731 new_limit.latency = best_cost.latency - op_cost;
2732 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2734 alg_in->cost.cost += op_cost;
2735 alg_in->cost.latency += op_cost;
2736 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2738 struct algorithm *x;
2739 best_cost = alg_in->cost;
2740 x = alg_in, alg_in = best_alg, best_alg = x;
2741 best_alg->log[best_alg->ops] = m;
2742 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2745 if (cache_hit)
2746 goto done;
2749 done:
2750 /* If best_cost has not decreased, we have not found any algorithm. */
2751 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2753 /* We failed to find an algorithm. Record alg_impossible for
2754 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2755 we are asked to find an algorithm for T within the same or
2756 lower COST_LIMIT, we can immediately return to the
2757 caller. */
2758 alg_hash[hash_index].t = t;
2759 alg_hash[hash_index].mode = mode;
2760 alg_hash[hash_index].speed = speed;
2761 alg_hash[hash_index].alg = alg_impossible;
2762 alg_hash[hash_index].cost = *cost_limit;
2763 return;
2766 /* Cache the result. */
2767 if (!cache_hit)
2769 alg_hash[hash_index].t = t;
2770 alg_hash[hash_index].mode = mode;
2771 alg_hash[hash_index].speed = speed;
2772 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2773 alg_hash[hash_index].cost.cost = best_cost.cost;
2774 alg_hash[hash_index].cost.latency = best_cost.latency;
2777 /* If we are getting a too long sequence for `struct algorithm'
2778 to record, make this search fail. */
2779 if (best_alg->ops == MAX_BITS_PER_WORD)
2780 return;
2782 /* Copy the algorithm from temporary space to the space at alg_out.
2783 We avoid using structure assignment because the majority of
2784 best_alg is normally undefined, and this is a critical function. */
2785 alg_out->ops = best_alg->ops + 1;
2786 alg_out->cost = best_cost;
2787 memcpy (alg_out->op, best_alg->op,
2788 alg_out->ops * sizeof *alg_out->op);
2789 memcpy (alg_out->log, best_alg->log,
2790 alg_out->ops * sizeof *alg_out->log);
2793 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2794 Try three variations:
2796 - a shift/add sequence based on VAL itself
2797 - a shift/add sequence based on -VAL, followed by a negation
2798 - a shift/add sequence based on VAL - 1, followed by an addition.
2800 Return true if the cheapest of these cost less than MULT_COST,
2801 describing the algorithm in *ALG and final fixup in *VARIANT. */
2803 static bool
2804 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2805 struct algorithm *alg, enum mult_variant *variant,
2806 int mult_cost)
2808 struct algorithm alg2;
2809 struct mult_cost limit;
2810 int op_cost;
2811 bool speed = optimize_insn_for_speed_p ();
2813 /* Fail quickly for impossible bounds. */
2814 if (mult_cost < 0)
2815 return false;
2817 /* Ensure that mult_cost provides a reasonable upper bound.
2818 Any constant multiplication can be performed with less
2819 than 2 * bits additions. */
2820 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[speed][mode];
2821 if (mult_cost > op_cost)
2822 mult_cost = op_cost;
2824 *variant = basic_variant;
2825 limit.cost = mult_cost;
2826 limit.latency = mult_cost;
2827 synth_mult (alg, val, &limit, mode);
2829 /* This works only if the inverted value actually fits in an
2830 `unsigned int' */
2831 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2833 op_cost = neg_cost[speed][mode];
2834 if (MULT_COST_LESS (&alg->cost, mult_cost))
2836 limit.cost = alg->cost.cost - op_cost;
2837 limit.latency = alg->cost.latency - op_cost;
2839 else
2841 limit.cost = mult_cost - op_cost;
2842 limit.latency = mult_cost - op_cost;
2845 synth_mult (&alg2, -val, &limit, mode);
2846 alg2.cost.cost += op_cost;
2847 alg2.cost.latency += op_cost;
2848 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2849 *alg = alg2, *variant = negate_variant;
2852 /* This proves very useful for division-by-constant. */
2853 op_cost = add_cost[speed][mode];
2854 if (MULT_COST_LESS (&alg->cost, mult_cost))
2856 limit.cost = alg->cost.cost - op_cost;
2857 limit.latency = alg->cost.latency - op_cost;
2859 else
2861 limit.cost = mult_cost - op_cost;
2862 limit.latency = mult_cost - op_cost;
2865 synth_mult (&alg2, val - 1, &limit, mode);
2866 alg2.cost.cost += op_cost;
2867 alg2.cost.latency += op_cost;
2868 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2869 *alg = alg2, *variant = add_variant;
2871 return MULT_COST_LESS (&alg->cost, mult_cost);
2874 /* A subroutine of expand_mult, used for constant multiplications.
2875 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2876 convenient. Use the shift/add sequence described by ALG and apply
2877 the final fixup specified by VARIANT. */
2879 static rtx
2880 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2881 rtx target, const struct algorithm *alg,
2882 enum mult_variant variant)
2884 HOST_WIDE_INT val_so_far;
2885 rtx insn, accum, tem;
2886 int opno;
2887 enum machine_mode nmode;
2889 /* Avoid referencing memory over and over and invalid sharing
2890 on SUBREGs. */
2891 op0 = force_reg (mode, op0);
2893 /* ACCUM starts out either as OP0 or as a zero, depending on
2894 the first operation. */
2896 if (alg->op[0] == alg_zero)
2898 accum = copy_to_mode_reg (mode, const0_rtx);
2899 val_so_far = 0;
2901 else if (alg->op[0] == alg_m)
2903 accum = copy_to_mode_reg (mode, op0);
2904 val_so_far = 1;
2906 else
2907 gcc_unreachable ();
2909 for (opno = 1; opno < alg->ops; opno++)
2911 int log = alg->log[opno];
2912 rtx shift_subtarget = optimize ? 0 : accum;
2913 rtx add_target
2914 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2915 && !optimize)
2916 ? target : 0;
2917 rtx accum_target = optimize ? 0 : accum;
2919 switch (alg->op[opno])
2921 case alg_shift:
2922 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2923 build_int_cst (NULL_TREE, log),
2924 NULL_RTX, 0);
2925 val_so_far <<= log;
2926 break;
2928 case alg_add_t_m2:
2929 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2930 build_int_cst (NULL_TREE, log),
2931 NULL_RTX, 0);
2932 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2933 add_target ? add_target : accum_target);
2934 val_so_far += (HOST_WIDE_INT) 1 << log;
2935 break;
2937 case alg_sub_t_m2:
2938 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2939 build_int_cst (NULL_TREE, log),
2940 NULL_RTX, 0);
2941 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2942 add_target ? add_target : accum_target);
2943 val_so_far -= (HOST_WIDE_INT) 1 << log;
2944 break;
2946 case alg_add_t2_m:
2947 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2948 build_int_cst (NULL_TREE, log),
2949 shift_subtarget,
2951 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2952 add_target ? add_target : accum_target);
2953 val_so_far = (val_so_far << log) + 1;
2954 break;
2956 case alg_sub_t2_m:
2957 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2958 build_int_cst (NULL_TREE, log),
2959 shift_subtarget, 0);
2960 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2961 add_target ? add_target : accum_target);
2962 val_so_far = (val_so_far << log) - 1;
2963 break;
2965 case alg_add_factor:
2966 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2967 build_int_cst (NULL_TREE, log),
2968 NULL_RTX, 0);
2969 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2970 add_target ? add_target : accum_target);
2971 val_so_far += val_so_far << log;
2972 break;
2974 case alg_sub_factor:
2975 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2976 build_int_cst (NULL_TREE, log),
2977 NULL_RTX, 0);
2978 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2979 (add_target
2980 ? add_target : (optimize ? 0 : tem)));
2981 val_so_far = (val_so_far << log) - val_so_far;
2982 break;
2984 default:
2985 gcc_unreachable ();
2988 /* Write a REG_EQUAL note on the last insn so that we can cse
2989 multiplication sequences. Note that if ACCUM is a SUBREG,
2990 we've set the inner register and must properly indicate
2991 that. */
2993 tem = op0, nmode = mode;
2994 if (GET_CODE (accum) == SUBREG)
2996 nmode = GET_MODE (SUBREG_REG (accum));
2997 tem = gen_lowpart (nmode, op0);
3000 insn = get_last_insn ();
3001 set_unique_reg_note (insn, REG_EQUAL,
3002 gen_rtx_MULT (nmode, tem,
3003 GEN_INT (val_so_far)));
3006 if (variant == negate_variant)
3008 val_so_far = -val_so_far;
3009 accum = expand_unop (mode, neg_optab, accum, target, 0);
3011 else if (variant == add_variant)
3013 val_so_far = val_so_far + 1;
3014 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3017 /* Compare only the bits of val and val_so_far that are significant
3018 in the result mode, to avoid sign-/zero-extension confusion. */
3019 val &= GET_MODE_MASK (mode);
3020 val_so_far &= GET_MODE_MASK (mode);
3021 gcc_assert (val == val_so_far);
3023 return accum;
3026 /* Perform a multiplication and return an rtx for the result.
3027 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3028 TARGET is a suggestion for where to store the result (an rtx).
3030 We check specially for a constant integer as OP1.
3031 If you want this check for OP0 as well, then before calling
3032 you should swap the two operands if OP0 would be constant. */
3035 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3036 int unsignedp)
3038 enum mult_variant variant;
3039 struct algorithm algorithm;
3040 int max_cost;
3041 bool speed = optimize_insn_for_speed_p ();
3043 /* Handling const0_rtx here allows us to use zero as a rogue value for
3044 coeff below. */
3045 if (op1 == const0_rtx)
3046 return const0_rtx;
3047 if (op1 == const1_rtx)
3048 return op0;
3049 if (op1 == constm1_rtx)
3050 return expand_unop (mode,
3051 GET_MODE_CLASS (mode) == MODE_INT
3052 && !unsignedp && flag_trapv
3053 ? negv_optab : neg_optab,
3054 op0, target, 0);
3056 /* These are the operations that are potentially turned into a sequence
3057 of shifts and additions. */
3058 if (SCALAR_INT_MODE_P (mode)
3059 && (unsignedp || !flag_trapv))
3061 HOST_WIDE_INT coeff = 0;
3062 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3064 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3065 less than or equal in size to `unsigned int' this doesn't matter.
3066 If the mode is larger than `unsigned int', then synth_mult works
3067 only if the constant value exactly fits in an `unsigned int' without
3068 any truncation. This means that multiplying by negative values does
3069 not work; results are off by 2^32 on a 32 bit machine. */
3071 if (GET_CODE (op1) == CONST_INT)
3073 /* Attempt to handle multiplication of DImode values by negative
3074 coefficients, by performing the multiplication by a positive
3075 multiplier and then inverting the result. */
3076 if (INTVAL (op1) < 0
3077 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3079 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3080 result is interpreted as an unsigned coefficient.
3081 Exclude cost of op0 from max_cost to match the cost
3082 calculation of the synth_mult. */
3083 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed)
3084 - neg_cost[speed][mode];
3085 if (max_cost > 0
3086 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3087 &variant, max_cost))
3089 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3090 NULL_RTX, &algorithm,
3091 variant);
3092 return expand_unop (mode, neg_optab, temp, target, 0);
3095 else coeff = INTVAL (op1);
3097 else if (GET_CODE (op1) == CONST_DOUBLE)
3099 /* If we are multiplying in DImode, it may still be a win
3100 to try to work with shifts and adds. */
3101 if (CONST_DOUBLE_HIGH (op1) == 0
3102 && CONST_DOUBLE_LOW (op1) > 0)
3103 coeff = CONST_DOUBLE_LOW (op1);
3104 else if (CONST_DOUBLE_LOW (op1) == 0
3105 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3107 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3108 + HOST_BITS_PER_WIDE_INT;
3109 return expand_shift (LSHIFT_EXPR, mode, op0,
3110 build_int_cst (NULL_TREE, shift),
3111 target, unsignedp);
3115 /* We used to test optimize here, on the grounds that it's better to
3116 produce a smaller program when -O is not used. But this causes
3117 such a terrible slowdown sometimes that it seems better to always
3118 use synth_mult. */
3119 if (coeff != 0)
3121 /* Special case powers of two. */
3122 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3123 return expand_shift (LSHIFT_EXPR, mode, op0,
3124 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3125 target, unsignedp);
3127 /* Exclude cost of op0 from max_cost to match the cost
3128 calculation of the synth_mult. */
3129 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed);
3130 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3131 max_cost))
3132 return expand_mult_const (mode, op0, coeff, target,
3133 &algorithm, variant);
3137 if (GET_CODE (op0) == CONST_DOUBLE)
3139 rtx temp = op0;
3140 op0 = op1;
3141 op1 = temp;
3144 /* Expand x*2.0 as x+x. */
3145 if (GET_CODE (op1) == CONST_DOUBLE
3146 && SCALAR_FLOAT_MODE_P (mode))
3148 REAL_VALUE_TYPE d;
3149 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3151 if (REAL_VALUES_EQUAL (d, dconst2))
3153 op0 = force_reg (GET_MODE (op0), op0);
3154 return expand_binop (mode, add_optab, op0, op0,
3155 target, unsignedp, OPTAB_LIB_WIDEN);
3159 /* This used to use umul_optab if unsigned, but for non-widening multiply
3160 there is no difference between signed and unsigned. */
3161 op0 = expand_binop (mode,
3162 ! unsignedp
3163 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3164 ? smulv_optab : smul_optab,
3165 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3166 gcc_assert (op0);
3167 return op0;
3170 /* Return the smallest n such that 2**n >= X. */
3173 ceil_log2 (unsigned HOST_WIDE_INT x)
3175 return floor_log2 (x - 1) + 1;
3178 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3179 replace division by D, and put the least significant N bits of the result
3180 in *MULTIPLIER_PTR and return the most significant bit.
3182 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3183 needed precision is in PRECISION (should be <= N).
3185 PRECISION should be as small as possible so this function can choose
3186 multiplier more freely.
3188 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3189 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3191 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3192 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3194 static
3195 unsigned HOST_WIDE_INT
3196 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3197 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3199 HOST_WIDE_INT mhigh_hi, mlow_hi;
3200 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3201 int lgup, post_shift;
3202 int pow, pow2;
3203 unsigned HOST_WIDE_INT nl, dummy1;
3204 HOST_WIDE_INT nh, dummy2;
3206 /* lgup = ceil(log2(divisor)); */
3207 lgup = ceil_log2 (d);
3209 gcc_assert (lgup <= n);
3211 pow = n + lgup;
3212 pow2 = n + lgup - precision;
3214 /* We could handle this with some effort, but this case is much
3215 better handled directly with a scc insn, so rely on caller using
3216 that. */
3217 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3219 /* mlow = 2^(N + lgup)/d */
3220 if (pow >= HOST_BITS_PER_WIDE_INT)
3222 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3223 nl = 0;
3225 else
3227 nh = 0;
3228 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3230 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3231 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3233 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3234 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3235 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3236 else
3237 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3238 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3239 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3241 gcc_assert (!mhigh_hi || nh - d < d);
3242 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3243 /* Assert that mlow < mhigh. */
3244 gcc_assert (mlow_hi < mhigh_hi
3245 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3247 /* If precision == N, then mlow, mhigh exceed 2^N
3248 (but they do not exceed 2^(N+1)). */
3250 /* Reduce to lowest terms. */
3251 for (post_shift = lgup; post_shift > 0; post_shift--)
3253 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3254 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3255 if (ml_lo >= mh_lo)
3256 break;
3258 mlow_hi = 0;
3259 mlow_lo = ml_lo;
3260 mhigh_hi = 0;
3261 mhigh_lo = mh_lo;
3264 *post_shift_ptr = post_shift;
3265 *lgup_ptr = lgup;
3266 if (n < HOST_BITS_PER_WIDE_INT)
3268 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3269 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3270 return mhigh_lo >= mask;
3272 else
3274 *multiplier_ptr = GEN_INT (mhigh_lo);
3275 return mhigh_hi;
3279 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3280 congruent to 1 (mod 2**N). */
3282 static unsigned HOST_WIDE_INT
3283 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3285 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3287 /* The algorithm notes that the choice y = x satisfies
3288 x*y == 1 mod 2^3, since x is assumed odd.
3289 Each iteration doubles the number of bits of significance in y. */
3291 unsigned HOST_WIDE_INT mask;
3292 unsigned HOST_WIDE_INT y = x;
3293 int nbit = 3;
3295 mask = (n == HOST_BITS_PER_WIDE_INT
3296 ? ~(unsigned HOST_WIDE_INT) 0
3297 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3299 while (nbit < n)
3301 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3302 nbit *= 2;
3304 return y;
3307 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3308 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3309 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3310 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3311 become signed.
3313 The result is put in TARGET if that is convenient.
3315 MODE is the mode of operation. */
3318 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3319 rtx op1, rtx target, int unsignedp)
3321 rtx tem;
3322 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3324 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3325 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3326 NULL_RTX, 0);
3327 tem = expand_and (mode, tem, op1, NULL_RTX);
3328 adj_operand
3329 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3330 adj_operand);
3332 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3333 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3334 NULL_RTX, 0);
3335 tem = expand_and (mode, tem, op0, NULL_RTX);
3336 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3337 target);
3339 return target;
3342 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3344 static rtx
3345 extract_high_half (enum machine_mode mode, rtx op)
3347 enum machine_mode wider_mode;
3349 if (mode == word_mode)
3350 return gen_highpart (mode, op);
3352 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3354 wider_mode = GET_MODE_WIDER_MODE (mode);
3355 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3356 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
3357 return convert_modes (mode, wider_mode, op, 0);
3360 /* Like expand_mult_highpart, but only consider using a multiplication
3361 optab. OP1 is an rtx for the constant operand. */
3363 static rtx
3364 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3365 rtx target, int unsignedp, int max_cost)
3367 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3368 enum machine_mode wider_mode;
3369 optab moptab;
3370 rtx tem;
3371 int size;
3372 bool speed = optimize_insn_for_speed_p ();
3374 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3376 wider_mode = GET_MODE_WIDER_MODE (mode);
3377 size = GET_MODE_BITSIZE (mode);
3379 /* Firstly, try using a multiplication insn that only generates the needed
3380 high part of the product, and in the sign flavor of unsignedp. */
3381 if (mul_highpart_cost[speed][mode] < max_cost)
3383 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3384 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3385 unsignedp, OPTAB_DIRECT);
3386 if (tem)
3387 return tem;
3390 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3391 Need to adjust the result after the multiplication. */
3392 if (size - 1 < BITS_PER_WORD
3393 && (mul_highpart_cost[speed][mode] + 2 * shift_cost[speed][mode][size-1]
3394 + 4 * add_cost[speed][mode] < max_cost))
3396 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3397 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3398 unsignedp, OPTAB_DIRECT);
3399 if (tem)
3400 /* We used the wrong signedness. Adjust the result. */
3401 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3402 tem, unsignedp);
3405 /* Try widening multiplication. */
3406 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3407 if (optab_handler (moptab, wider_mode)->insn_code != CODE_FOR_nothing
3408 && mul_widen_cost[speed][wider_mode] < max_cost)
3410 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3411 unsignedp, OPTAB_WIDEN);
3412 if (tem)
3413 return extract_high_half (mode, tem);
3416 /* Try widening the mode and perform a non-widening multiplication. */
3417 if (optab_handler (smul_optab, wider_mode)->insn_code != CODE_FOR_nothing
3418 && size - 1 < BITS_PER_WORD
3419 && mul_cost[speed][wider_mode] + shift_cost[speed][mode][size-1] < max_cost)
3421 rtx insns, wop0, wop1;
3423 /* We need to widen the operands, for example to ensure the
3424 constant multiplier is correctly sign or zero extended.
3425 Use a sequence to clean-up any instructions emitted by
3426 the conversions if things don't work out. */
3427 start_sequence ();
3428 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3429 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3430 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3431 unsignedp, OPTAB_WIDEN);
3432 insns = get_insns ();
3433 end_sequence ();
3435 if (tem)
3437 emit_insn (insns);
3438 return extract_high_half (mode, tem);
3442 /* Try widening multiplication of opposite signedness, and adjust. */
3443 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3444 if (optab_handler (moptab, wider_mode)->insn_code != CODE_FOR_nothing
3445 && size - 1 < BITS_PER_WORD
3446 && (mul_widen_cost[speed][wider_mode] + 2 * shift_cost[speed][mode][size-1]
3447 + 4 * add_cost[speed][mode] < max_cost))
3449 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3450 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3451 if (tem != 0)
3453 tem = extract_high_half (mode, tem);
3454 /* We used the wrong signedness. Adjust the result. */
3455 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3456 target, unsignedp);
3460 return 0;
3463 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3464 putting the high half of the result in TARGET if that is convenient,
3465 and return where the result is. If the operation can not be performed,
3466 0 is returned.
3468 MODE is the mode of operation and result.
3470 UNSIGNEDP nonzero means unsigned multiply.
3472 MAX_COST is the total allowed cost for the expanded RTL. */
3474 static rtx
3475 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3476 rtx target, int unsignedp, int max_cost)
3478 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3479 unsigned HOST_WIDE_INT cnst1;
3480 int extra_cost;
3481 bool sign_adjust = false;
3482 enum mult_variant variant;
3483 struct algorithm alg;
3484 rtx tem;
3485 bool speed = optimize_insn_for_speed_p ();
3487 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3488 /* We can't support modes wider than HOST_BITS_PER_INT. */
3489 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3491 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3493 /* We can't optimize modes wider than BITS_PER_WORD.
3494 ??? We might be able to perform double-word arithmetic if
3495 mode == word_mode, however all the cost calculations in
3496 synth_mult etc. assume single-word operations. */
3497 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3498 return expand_mult_highpart_optab (mode, op0, op1, target,
3499 unsignedp, max_cost);
3501 extra_cost = shift_cost[speed][mode][GET_MODE_BITSIZE (mode) - 1];
3503 /* Check whether we try to multiply by a negative constant. */
3504 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3506 sign_adjust = true;
3507 extra_cost += add_cost[speed][mode];
3510 /* See whether shift/add multiplication is cheap enough. */
3511 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3512 max_cost - extra_cost))
3514 /* See whether the specialized multiplication optabs are
3515 cheaper than the shift/add version. */
3516 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3517 alg.cost.cost + extra_cost);
3518 if (tem)
3519 return tem;
3521 tem = convert_to_mode (wider_mode, op0, unsignedp);
3522 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3523 tem = extract_high_half (mode, tem);
3525 /* Adjust result for signedness. */
3526 if (sign_adjust)
3527 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3529 return tem;
3531 return expand_mult_highpart_optab (mode, op0, op1, target,
3532 unsignedp, max_cost);
3536 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3538 static rtx
3539 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3541 unsigned HOST_WIDE_INT masklow, maskhigh;
3542 rtx result, temp, shift, label;
3543 int logd;
3545 logd = floor_log2 (d);
3546 result = gen_reg_rtx (mode);
3548 /* Avoid conditional branches when they're expensive. */
3549 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3550 && optimize_insn_for_speed_p ())
3552 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3553 mode, 0, -1);
3554 if (signmask)
3556 signmask = force_reg (mode, signmask);
3557 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3558 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3560 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3561 which instruction sequence to use. If logical right shifts
3562 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3563 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3565 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3566 if (optab_handler (lshr_optab, mode)->insn_code == CODE_FOR_nothing
3567 || rtx_cost (temp, SET, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
3569 temp = expand_binop (mode, xor_optab, op0, signmask,
3570 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3571 temp = expand_binop (mode, sub_optab, temp, signmask,
3572 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3573 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3574 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3575 temp = expand_binop (mode, xor_optab, temp, signmask,
3576 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3577 temp = expand_binop (mode, sub_optab, temp, signmask,
3578 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3580 else
3582 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3583 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3584 signmask = force_reg (mode, signmask);
3586 temp = expand_binop (mode, add_optab, op0, signmask,
3587 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3588 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3589 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3590 temp = expand_binop (mode, sub_optab, temp, signmask,
3591 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3593 return temp;
3597 /* Mask contains the mode's signbit and the significant bits of the
3598 modulus. By including the signbit in the operation, many targets
3599 can avoid an explicit compare operation in the following comparison
3600 against zero. */
3602 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3603 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3605 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3606 maskhigh = -1;
3608 else
3609 maskhigh = (HOST_WIDE_INT) -1
3610 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3612 temp = expand_binop (mode, and_optab, op0,
3613 immed_double_const (masklow, maskhigh, mode),
3614 result, 1, OPTAB_LIB_WIDEN);
3615 if (temp != result)
3616 emit_move_insn (result, temp);
3618 label = gen_label_rtx ();
3619 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3621 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3622 0, OPTAB_LIB_WIDEN);
3623 masklow = (HOST_WIDE_INT) -1 << logd;
3624 maskhigh = -1;
3625 temp = expand_binop (mode, ior_optab, temp,
3626 immed_double_const (masklow, maskhigh, mode),
3627 result, 1, OPTAB_LIB_WIDEN);
3628 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3629 0, OPTAB_LIB_WIDEN);
3630 if (temp != result)
3631 emit_move_insn (result, temp);
3632 emit_label (label);
3633 return result;
3636 /* Expand signed division of OP0 by a power of two D in mode MODE.
3637 This routine is only called for positive values of D. */
3639 static rtx
3640 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3642 rtx temp, label;
3643 tree shift;
3644 int logd;
3646 logd = floor_log2 (d);
3647 shift = build_int_cst (NULL_TREE, logd);
3649 if (d == 2
3650 && BRANCH_COST (optimize_insn_for_speed_p (),
3651 false) >= 1)
3653 temp = gen_reg_rtx (mode);
3654 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3655 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3656 0, OPTAB_LIB_WIDEN);
3657 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3660 #ifdef HAVE_conditional_move
3661 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3662 >= 2)
3664 rtx temp2;
3666 /* ??? emit_conditional_move forces a stack adjustment via
3667 compare_from_rtx so, if the sequence is discarded, it will
3668 be lost. Do it now instead. */
3669 do_pending_stack_adjust ();
3671 start_sequence ();
3672 temp2 = copy_to_mode_reg (mode, op0);
3673 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3674 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3675 temp = force_reg (mode, temp);
3677 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3678 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3679 mode, temp, temp2, mode, 0);
3680 if (temp2)
3682 rtx seq = get_insns ();
3683 end_sequence ();
3684 emit_insn (seq);
3685 return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
3687 end_sequence ();
3689 #endif
3691 if (BRANCH_COST (optimize_insn_for_speed_p (),
3692 false) >= 2)
3694 int ushift = GET_MODE_BITSIZE (mode) - logd;
3696 temp = gen_reg_rtx (mode);
3697 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3698 if (shift_cost[optimize_insn_for_speed_p ()][mode][ushift] > COSTS_N_INSNS (1))
3699 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3700 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3701 else
3702 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3703 build_int_cst (NULL_TREE, ushift),
3704 NULL_RTX, 1);
3705 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3706 0, OPTAB_LIB_WIDEN);
3707 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3710 label = gen_label_rtx ();
3711 temp = copy_to_mode_reg (mode, op0);
3712 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3713 expand_inc (temp, GEN_INT (d - 1));
3714 emit_label (label);
3715 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3718 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3719 if that is convenient, and returning where the result is.
3720 You may request either the quotient or the remainder as the result;
3721 specify REM_FLAG nonzero to get the remainder.
3723 CODE is the expression code for which kind of division this is;
3724 it controls how rounding is done. MODE is the machine mode to use.
3725 UNSIGNEDP nonzero means do unsigned division. */
3727 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3728 and then correct it by or'ing in missing high bits
3729 if result of ANDI is nonzero.
3730 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3731 This could optimize to a bfexts instruction.
3732 But C doesn't use these operations, so their optimizations are
3733 left for later. */
3734 /* ??? For modulo, we don't actually need the highpart of the first product,
3735 the low part will do nicely. And for small divisors, the second multiply
3736 can also be a low-part only multiply or even be completely left out.
3737 E.g. to calculate the remainder of a division by 3 with a 32 bit
3738 multiply, multiply with 0x55555556 and extract the upper two bits;
3739 the result is exact for inputs up to 0x1fffffff.
3740 The input range can be reduced by using cross-sum rules.
3741 For odd divisors >= 3, the following table gives right shift counts
3742 so that if a number is shifted by an integer multiple of the given
3743 amount, the remainder stays the same:
3744 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3745 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3746 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3747 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3748 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3750 Cross-sum rules for even numbers can be derived by leaving as many bits
3751 to the right alone as the divisor has zeros to the right.
3752 E.g. if x is an unsigned 32 bit number:
3753 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3757 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3758 rtx op0, rtx op1, rtx target, int unsignedp)
3760 enum machine_mode compute_mode;
3761 rtx tquotient;
3762 rtx quotient = 0, remainder = 0;
3763 rtx last;
3764 int size;
3765 rtx insn, set;
3766 optab optab1, optab2;
3767 int op1_is_constant, op1_is_pow2 = 0;
3768 int max_cost, extra_cost;
3769 static HOST_WIDE_INT last_div_const = 0;
3770 static HOST_WIDE_INT ext_op1;
3771 bool speed = optimize_insn_for_speed_p ();
3773 op1_is_constant = GET_CODE (op1) == CONST_INT;
3774 if (op1_is_constant)
3776 ext_op1 = INTVAL (op1);
3777 if (unsignedp)
3778 ext_op1 &= GET_MODE_MASK (mode);
3779 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3780 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3784 This is the structure of expand_divmod:
3786 First comes code to fix up the operands so we can perform the operations
3787 correctly and efficiently.
3789 Second comes a switch statement with code specific for each rounding mode.
3790 For some special operands this code emits all RTL for the desired
3791 operation, for other cases, it generates only a quotient and stores it in
3792 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3793 to indicate that it has not done anything.
3795 Last comes code that finishes the operation. If QUOTIENT is set and
3796 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3797 QUOTIENT is not set, it is computed using trunc rounding.
3799 We try to generate special code for division and remainder when OP1 is a
3800 constant. If |OP1| = 2**n we can use shifts and some other fast
3801 operations. For other values of OP1, we compute a carefully selected
3802 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3803 by m.
3805 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3806 half of the product. Different strategies for generating the product are
3807 implemented in expand_mult_highpart.
3809 If what we actually want is the remainder, we generate that by another
3810 by-constant multiplication and a subtraction. */
3812 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3813 code below will malfunction if we are, so check here and handle
3814 the special case if so. */
3815 if (op1 == const1_rtx)
3816 return rem_flag ? const0_rtx : op0;
3818 /* When dividing by -1, we could get an overflow.
3819 negv_optab can handle overflows. */
3820 if (! unsignedp && op1 == constm1_rtx)
3822 if (rem_flag)
3823 return const0_rtx;
3824 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3825 ? negv_optab : neg_optab, op0, target, 0);
3828 if (target
3829 /* Don't use the function value register as a target
3830 since we have to read it as well as write it,
3831 and function-inlining gets confused by this. */
3832 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3833 /* Don't clobber an operand while doing a multi-step calculation. */
3834 || ((rem_flag || op1_is_constant)
3835 && (reg_mentioned_p (target, op0)
3836 || (MEM_P (op0) && MEM_P (target))))
3837 || reg_mentioned_p (target, op1)
3838 || (MEM_P (op1) && MEM_P (target))))
3839 target = 0;
3841 /* Get the mode in which to perform this computation. Normally it will
3842 be MODE, but sometimes we can't do the desired operation in MODE.
3843 If so, pick a wider mode in which we can do the operation. Convert
3844 to that mode at the start to avoid repeated conversions.
3846 First see what operations we need. These depend on the expression
3847 we are evaluating. (We assume that divxx3 insns exist under the
3848 same conditions that modxx3 insns and that these insns don't normally
3849 fail. If these assumptions are not correct, we may generate less
3850 efficient code in some cases.)
3852 Then see if we find a mode in which we can open-code that operation
3853 (either a division, modulus, or shift). Finally, check for the smallest
3854 mode for which we can do the operation with a library call. */
3856 /* We might want to refine this now that we have division-by-constant
3857 optimization. Since expand_mult_highpart tries so many variants, it is
3858 not straightforward to generalize this. Maybe we should make an array
3859 of possible modes in init_expmed? Save this for GCC 2.7. */
3861 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3862 ? (unsignedp ? lshr_optab : ashr_optab)
3863 : (unsignedp ? udiv_optab : sdiv_optab));
3864 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3865 ? optab1
3866 : (unsignedp ? udivmod_optab : sdivmod_optab));
3868 for (compute_mode = mode; compute_mode != VOIDmode;
3869 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3870 if (optab_handler (optab1, compute_mode)->insn_code != CODE_FOR_nothing
3871 || optab_handler (optab2, compute_mode)->insn_code != CODE_FOR_nothing)
3872 break;
3874 if (compute_mode == VOIDmode)
3875 for (compute_mode = mode; compute_mode != VOIDmode;
3876 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3877 if (optab_libfunc (optab1, compute_mode)
3878 || optab_libfunc (optab2, compute_mode))
3879 break;
3881 /* If we still couldn't find a mode, use MODE, but expand_binop will
3882 probably die. */
3883 if (compute_mode == VOIDmode)
3884 compute_mode = mode;
3886 if (target && GET_MODE (target) == compute_mode)
3887 tquotient = target;
3888 else
3889 tquotient = gen_reg_rtx (compute_mode);
3891 size = GET_MODE_BITSIZE (compute_mode);
3892 #if 0
3893 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3894 (mode), and thereby get better code when OP1 is a constant. Do that
3895 later. It will require going over all usages of SIZE below. */
3896 size = GET_MODE_BITSIZE (mode);
3897 #endif
3899 /* Only deduct something for a REM if the last divide done was
3900 for a different constant. Then set the constant of the last
3901 divide. */
3902 max_cost = unsignedp ? udiv_cost[speed][compute_mode] : sdiv_cost[speed][compute_mode];
3903 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3904 && INTVAL (op1) == last_div_const))
3905 max_cost -= mul_cost[speed][compute_mode] + add_cost[speed][compute_mode];
3907 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3909 /* Now convert to the best mode to use. */
3910 if (compute_mode != mode)
3912 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3913 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3915 /* convert_modes may have placed op1 into a register, so we
3916 must recompute the following. */
3917 op1_is_constant = GET_CODE (op1) == CONST_INT;
3918 op1_is_pow2 = (op1_is_constant
3919 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3920 || (! unsignedp
3921 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3924 /* If one of the operands is a volatile MEM, copy it into a register. */
3926 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3927 op0 = force_reg (compute_mode, op0);
3928 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3929 op1 = force_reg (compute_mode, op1);
3931 /* If we need the remainder or if OP1 is constant, we need to
3932 put OP0 in a register in case it has any queued subexpressions. */
3933 if (rem_flag || op1_is_constant)
3934 op0 = force_reg (compute_mode, op0);
3936 last = get_last_insn ();
3938 /* Promote floor rounding to trunc rounding for unsigned operations. */
3939 if (unsignedp)
3941 if (code == FLOOR_DIV_EXPR)
3942 code = TRUNC_DIV_EXPR;
3943 if (code == FLOOR_MOD_EXPR)
3944 code = TRUNC_MOD_EXPR;
3945 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3946 code = TRUNC_DIV_EXPR;
3949 if (op1 != const0_rtx)
3950 switch (code)
3952 case TRUNC_MOD_EXPR:
3953 case TRUNC_DIV_EXPR:
3954 if (op1_is_constant)
3956 if (unsignedp)
3958 unsigned HOST_WIDE_INT mh;
3959 int pre_shift, post_shift;
3960 int dummy;
3961 rtx ml;
3962 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3963 & GET_MODE_MASK (compute_mode));
3965 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3967 pre_shift = floor_log2 (d);
3968 if (rem_flag)
3970 remainder
3971 = expand_binop (compute_mode, and_optab, op0,
3972 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3973 remainder, 1,
3974 OPTAB_LIB_WIDEN);
3975 if (remainder)
3976 return gen_lowpart (mode, remainder);
3978 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3979 build_int_cst (NULL_TREE,
3980 pre_shift),
3981 tquotient, 1);
3983 else if (size <= HOST_BITS_PER_WIDE_INT)
3985 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3987 /* Most significant bit of divisor is set; emit an scc
3988 insn. */
3989 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3990 compute_mode, 1, 1);
3991 if (quotient == 0)
3992 goto fail1;
3994 else
3996 /* Find a suitable multiplier and right shift count
3997 instead of multiplying with D. */
3999 mh = choose_multiplier (d, size, size,
4000 &ml, &post_shift, &dummy);
4002 /* If the suggested multiplier is more than SIZE bits,
4003 we can do better for even divisors, using an
4004 initial right shift. */
4005 if (mh != 0 && (d & 1) == 0)
4007 pre_shift = floor_log2 (d & -d);
4008 mh = choose_multiplier (d >> pre_shift, size,
4009 size - pre_shift,
4010 &ml, &post_shift, &dummy);
4011 gcc_assert (!mh);
4013 else
4014 pre_shift = 0;
4016 if (mh != 0)
4018 rtx t1, t2, t3, t4;
4020 if (post_shift - 1 >= BITS_PER_WORD)
4021 goto fail1;
4023 extra_cost
4024 = (shift_cost[speed][compute_mode][post_shift - 1]
4025 + shift_cost[speed][compute_mode][1]
4026 + 2 * add_cost[speed][compute_mode]);
4027 t1 = expand_mult_highpart (compute_mode, op0, ml,
4028 NULL_RTX, 1,
4029 max_cost - extra_cost);
4030 if (t1 == 0)
4031 goto fail1;
4032 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4033 op0, t1),
4034 NULL_RTX);
4035 t3 = expand_shift
4036 (RSHIFT_EXPR, compute_mode, t2,
4037 build_int_cst (NULL_TREE, 1),
4038 NULL_RTX,1);
4039 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4040 t1, t3),
4041 NULL_RTX);
4042 quotient = expand_shift
4043 (RSHIFT_EXPR, compute_mode, t4,
4044 build_int_cst (NULL_TREE, post_shift - 1),
4045 tquotient, 1);
4047 else
4049 rtx t1, t2;
4051 if (pre_shift >= BITS_PER_WORD
4052 || post_shift >= BITS_PER_WORD)
4053 goto fail1;
4055 t1 = expand_shift
4056 (RSHIFT_EXPR, compute_mode, op0,
4057 build_int_cst (NULL_TREE, pre_shift),
4058 NULL_RTX, 1);
4059 extra_cost
4060 = (shift_cost[speed][compute_mode][pre_shift]
4061 + shift_cost[speed][compute_mode][post_shift]);
4062 t2 = expand_mult_highpart (compute_mode, t1, ml,
4063 NULL_RTX, 1,
4064 max_cost - extra_cost);
4065 if (t2 == 0)
4066 goto fail1;
4067 quotient = expand_shift
4068 (RSHIFT_EXPR, compute_mode, t2,
4069 build_int_cst (NULL_TREE, post_shift),
4070 tquotient, 1);
4074 else /* Too wide mode to use tricky code */
4075 break;
4077 insn = get_last_insn ();
4078 if (insn != last
4079 && (set = single_set (insn)) != 0
4080 && SET_DEST (set) == quotient)
4081 set_unique_reg_note (insn,
4082 REG_EQUAL,
4083 gen_rtx_UDIV (compute_mode, op0, op1));
4085 else /* TRUNC_DIV, signed */
4087 unsigned HOST_WIDE_INT ml;
4088 int lgup, post_shift;
4089 rtx mlr;
4090 HOST_WIDE_INT d = INTVAL (op1);
4091 unsigned HOST_WIDE_INT abs_d;
4093 /* Since d might be INT_MIN, we have to cast to
4094 unsigned HOST_WIDE_INT before negating to avoid
4095 undefined signed overflow. */
4096 abs_d = (d >= 0
4097 ? (unsigned HOST_WIDE_INT) d
4098 : - (unsigned HOST_WIDE_INT) d);
4100 /* n rem d = n rem -d */
4101 if (rem_flag && d < 0)
4103 d = abs_d;
4104 op1 = gen_int_mode (abs_d, compute_mode);
4107 if (d == 1)
4108 quotient = op0;
4109 else if (d == -1)
4110 quotient = expand_unop (compute_mode, neg_optab, op0,
4111 tquotient, 0);
4112 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4114 /* This case is not handled correctly below. */
4115 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4116 compute_mode, 1, 1);
4117 if (quotient == 0)
4118 goto fail1;
4120 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4121 && (rem_flag ? smod_pow2_cheap[speed][compute_mode]
4122 : sdiv_pow2_cheap[speed][compute_mode])
4123 /* We assume that cheap metric is true if the
4124 optab has an expander for this mode. */
4125 && ((optab_handler ((rem_flag ? smod_optab
4126 : sdiv_optab),
4127 compute_mode)->insn_code
4128 != CODE_FOR_nothing)
4129 || (optab_handler(sdivmod_optab,
4130 compute_mode)
4131 ->insn_code != CODE_FOR_nothing)))
4133 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4135 if (rem_flag)
4137 remainder = expand_smod_pow2 (compute_mode, op0, d);
4138 if (remainder)
4139 return gen_lowpart (mode, remainder);
4142 if (sdiv_pow2_cheap[speed][compute_mode]
4143 && ((optab_handler (sdiv_optab, compute_mode)->insn_code
4144 != CODE_FOR_nothing)
4145 || (optab_handler (sdivmod_optab, compute_mode)->insn_code
4146 != CODE_FOR_nothing)))
4147 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4148 compute_mode, op0,
4149 gen_int_mode (abs_d,
4150 compute_mode),
4151 NULL_RTX, 0);
4152 else
4153 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4155 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4156 negate the quotient. */
4157 if (d < 0)
4159 insn = get_last_insn ();
4160 if (insn != last
4161 && (set = single_set (insn)) != 0
4162 && SET_DEST (set) == quotient
4163 && abs_d < ((unsigned HOST_WIDE_INT) 1
4164 << (HOST_BITS_PER_WIDE_INT - 1)))
4165 set_unique_reg_note (insn,
4166 REG_EQUAL,
4167 gen_rtx_DIV (compute_mode,
4168 op0,
4169 GEN_INT
4170 (trunc_int_for_mode
4171 (abs_d,
4172 compute_mode))));
4174 quotient = expand_unop (compute_mode, neg_optab,
4175 quotient, quotient, 0);
4178 else if (size <= HOST_BITS_PER_WIDE_INT)
4180 choose_multiplier (abs_d, size, size - 1,
4181 &mlr, &post_shift, &lgup);
4182 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4183 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4185 rtx t1, t2, t3;
4187 if (post_shift >= BITS_PER_WORD
4188 || size - 1 >= BITS_PER_WORD)
4189 goto fail1;
4191 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4192 + shift_cost[speed][compute_mode][size - 1]
4193 + add_cost[speed][compute_mode]);
4194 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4195 NULL_RTX, 0,
4196 max_cost - extra_cost);
4197 if (t1 == 0)
4198 goto fail1;
4199 t2 = expand_shift
4200 (RSHIFT_EXPR, compute_mode, t1,
4201 build_int_cst (NULL_TREE, post_shift),
4202 NULL_RTX, 0);
4203 t3 = expand_shift
4204 (RSHIFT_EXPR, compute_mode, op0,
4205 build_int_cst (NULL_TREE, size - 1),
4206 NULL_RTX, 0);
4207 if (d < 0)
4208 quotient
4209 = force_operand (gen_rtx_MINUS (compute_mode,
4210 t3, t2),
4211 tquotient);
4212 else
4213 quotient
4214 = force_operand (gen_rtx_MINUS (compute_mode,
4215 t2, t3),
4216 tquotient);
4218 else
4220 rtx t1, t2, t3, t4;
4222 if (post_shift >= BITS_PER_WORD
4223 || size - 1 >= BITS_PER_WORD)
4224 goto fail1;
4226 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4227 mlr = gen_int_mode (ml, compute_mode);
4228 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4229 + shift_cost[speed][compute_mode][size - 1]
4230 + 2 * add_cost[speed][compute_mode]);
4231 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4232 NULL_RTX, 0,
4233 max_cost - extra_cost);
4234 if (t1 == 0)
4235 goto fail1;
4236 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4237 t1, op0),
4238 NULL_RTX);
4239 t3 = expand_shift
4240 (RSHIFT_EXPR, compute_mode, t2,
4241 build_int_cst (NULL_TREE, post_shift),
4242 NULL_RTX, 0);
4243 t4 = expand_shift
4244 (RSHIFT_EXPR, compute_mode, op0,
4245 build_int_cst (NULL_TREE, size - 1),
4246 NULL_RTX, 0);
4247 if (d < 0)
4248 quotient
4249 = force_operand (gen_rtx_MINUS (compute_mode,
4250 t4, t3),
4251 tquotient);
4252 else
4253 quotient
4254 = force_operand (gen_rtx_MINUS (compute_mode,
4255 t3, t4),
4256 tquotient);
4259 else /* Too wide mode to use tricky code */
4260 break;
4262 insn = get_last_insn ();
4263 if (insn != last
4264 && (set = single_set (insn)) != 0
4265 && SET_DEST (set) == quotient)
4266 set_unique_reg_note (insn,
4267 REG_EQUAL,
4268 gen_rtx_DIV (compute_mode, op0, op1));
4270 break;
4272 fail1:
4273 delete_insns_since (last);
4274 break;
4276 case FLOOR_DIV_EXPR:
4277 case FLOOR_MOD_EXPR:
4278 /* We will come here only for signed operations. */
4279 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4281 unsigned HOST_WIDE_INT mh;
4282 int pre_shift, lgup, post_shift;
4283 HOST_WIDE_INT d = INTVAL (op1);
4284 rtx ml;
4286 if (d > 0)
4288 /* We could just as easily deal with negative constants here,
4289 but it does not seem worth the trouble for GCC 2.6. */
4290 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4292 pre_shift = floor_log2 (d);
4293 if (rem_flag)
4295 remainder = expand_binop (compute_mode, and_optab, op0,
4296 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4297 remainder, 0, OPTAB_LIB_WIDEN);
4298 if (remainder)
4299 return gen_lowpart (mode, remainder);
4301 quotient = expand_shift
4302 (RSHIFT_EXPR, compute_mode, op0,
4303 build_int_cst (NULL_TREE, pre_shift),
4304 tquotient, 0);
4306 else
4308 rtx t1, t2, t3, t4;
4310 mh = choose_multiplier (d, size, size - 1,
4311 &ml, &post_shift, &lgup);
4312 gcc_assert (!mh);
4314 if (post_shift < BITS_PER_WORD
4315 && size - 1 < BITS_PER_WORD)
4317 t1 = expand_shift
4318 (RSHIFT_EXPR, compute_mode, op0,
4319 build_int_cst (NULL_TREE, size - 1),
4320 NULL_RTX, 0);
4321 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4322 NULL_RTX, 0, OPTAB_WIDEN);
4323 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4324 + shift_cost[speed][compute_mode][size - 1]
4325 + 2 * add_cost[speed][compute_mode]);
4326 t3 = expand_mult_highpart (compute_mode, t2, ml,
4327 NULL_RTX, 1,
4328 max_cost - extra_cost);
4329 if (t3 != 0)
4331 t4 = expand_shift
4332 (RSHIFT_EXPR, compute_mode, t3,
4333 build_int_cst (NULL_TREE, post_shift),
4334 NULL_RTX, 1);
4335 quotient = expand_binop (compute_mode, xor_optab,
4336 t4, t1, tquotient, 0,
4337 OPTAB_WIDEN);
4342 else
4344 rtx nsign, t1, t2, t3, t4;
4345 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4346 op0, constm1_rtx), NULL_RTX);
4347 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4348 0, OPTAB_WIDEN);
4349 nsign = expand_shift
4350 (RSHIFT_EXPR, compute_mode, t2,
4351 build_int_cst (NULL_TREE, size - 1),
4352 NULL_RTX, 0);
4353 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4354 NULL_RTX);
4355 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4356 NULL_RTX, 0);
4357 if (t4)
4359 rtx t5;
4360 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4361 NULL_RTX, 0);
4362 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4363 t4, t5),
4364 tquotient);
4369 if (quotient != 0)
4370 break;
4371 delete_insns_since (last);
4373 /* Try using an instruction that produces both the quotient and
4374 remainder, using truncation. We can easily compensate the quotient
4375 or remainder to get floor rounding, once we have the remainder.
4376 Notice that we compute also the final remainder value here,
4377 and return the result right away. */
4378 if (target == 0 || GET_MODE (target) != compute_mode)
4379 target = gen_reg_rtx (compute_mode);
4381 if (rem_flag)
4383 remainder
4384 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4385 quotient = gen_reg_rtx (compute_mode);
4387 else
4389 quotient
4390 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4391 remainder = gen_reg_rtx (compute_mode);
4394 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4395 quotient, remainder, 0))
4397 /* This could be computed with a branch-less sequence.
4398 Save that for later. */
4399 rtx tem;
4400 rtx label = gen_label_rtx ();
4401 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4402 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4403 NULL_RTX, 0, OPTAB_WIDEN);
4404 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4405 expand_dec (quotient, const1_rtx);
4406 expand_inc (remainder, op1);
4407 emit_label (label);
4408 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4411 /* No luck with division elimination or divmod. Have to do it
4412 by conditionally adjusting op0 *and* the result. */
4414 rtx label1, label2, label3, label4, label5;
4415 rtx adjusted_op0;
4416 rtx tem;
4418 quotient = gen_reg_rtx (compute_mode);
4419 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4420 label1 = gen_label_rtx ();
4421 label2 = gen_label_rtx ();
4422 label3 = gen_label_rtx ();
4423 label4 = gen_label_rtx ();
4424 label5 = gen_label_rtx ();
4425 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4426 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4427 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4428 quotient, 0, OPTAB_LIB_WIDEN);
4429 if (tem != quotient)
4430 emit_move_insn (quotient, tem);
4431 emit_jump_insn (gen_jump (label5));
4432 emit_barrier ();
4433 emit_label (label1);
4434 expand_inc (adjusted_op0, const1_rtx);
4435 emit_jump_insn (gen_jump (label4));
4436 emit_barrier ();
4437 emit_label (label2);
4438 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4439 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4440 quotient, 0, OPTAB_LIB_WIDEN);
4441 if (tem != quotient)
4442 emit_move_insn (quotient, tem);
4443 emit_jump_insn (gen_jump (label5));
4444 emit_barrier ();
4445 emit_label (label3);
4446 expand_dec (adjusted_op0, const1_rtx);
4447 emit_label (label4);
4448 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4449 quotient, 0, OPTAB_LIB_WIDEN);
4450 if (tem != quotient)
4451 emit_move_insn (quotient, tem);
4452 expand_dec (quotient, const1_rtx);
4453 emit_label (label5);
4455 break;
4457 case CEIL_DIV_EXPR:
4458 case CEIL_MOD_EXPR:
4459 if (unsignedp)
4461 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4463 rtx t1, t2, t3;
4464 unsigned HOST_WIDE_INT d = INTVAL (op1);
4465 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4466 build_int_cst (NULL_TREE, floor_log2 (d)),
4467 tquotient, 1);
4468 t2 = expand_binop (compute_mode, and_optab, op0,
4469 GEN_INT (d - 1),
4470 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4471 t3 = gen_reg_rtx (compute_mode);
4472 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4473 compute_mode, 1, 1);
4474 if (t3 == 0)
4476 rtx lab;
4477 lab = gen_label_rtx ();
4478 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4479 expand_inc (t1, const1_rtx);
4480 emit_label (lab);
4481 quotient = t1;
4483 else
4484 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4485 t1, t3),
4486 tquotient);
4487 break;
4490 /* Try using an instruction that produces both the quotient and
4491 remainder, using truncation. We can easily compensate the
4492 quotient or remainder to get ceiling rounding, once we have the
4493 remainder. Notice that we compute also the final remainder
4494 value here, and return the result right away. */
4495 if (target == 0 || GET_MODE (target) != compute_mode)
4496 target = gen_reg_rtx (compute_mode);
4498 if (rem_flag)
4500 remainder = (REG_P (target)
4501 ? target : gen_reg_rtx (compute_mode));
4502 quotient = gen_reg_rtx (compute_mode);
4504 else
4506 quotient = (REG_P (target)
4507 ? target : gen_reg_rtx (compute_mode));
4508 remainder = gen_reg_rtx (compute_mode);
4511 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4512 remainder, 1))
4514 /* This could be computed with a branch-less sequence.
4515 Save that for later. */
4516 rtx label = gen_label_rtx ();
4517 do_cmp_and_jump (remainder, const0_rtx, EQ,
4518 compute_mode, label);
4519 expand_inc (quotient, const1_rtx);
4520 expand_dec (remainder, op1);
4521 emit_label (label);
4522 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4525 /* No luck with division elimination or divmod. Have to do it
4526 by conditionally adjusting op0 *and* the result. */
4528 rtx label1, label2;
4529 rtx adjusted_op0, tem;
4531 quotient = gen_reg_rtx (compute_mode);
4532 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4533 label1 = gen_label_rtx ();
4534 label2 = gen_label_rtx ();
4535 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4536 compute_mode, label1);
4537 emit_move_insn (quotient, const0_rtx);
4538 emit_jump_insn (gen_jump (label2));
4539 emit_barrier ();
4540 emit_label (label1);
4541 expand_dec (adjusted_op0, const1_rtx);
4542 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4543 quotient, 1, OPTAB_LIB_WIDEN);
4544 if (tem != quotient)
4545 emit_move_insn (quotient, tem);
4546 expand_inc (quotient, const1_rtx);
4547 emit_label (label2);
4550 else /* signed */
4552 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4553 && INTVAL (op1) >= 0)
4555 /* This is extremely similar to the code for the unsigned case
4556 above. For 2.7 we should merge these variants, but for
4557 2.6.1 I don't want to touch the code for unsigned since that
4558 get used in C. The signed case will only be used by other
4559 languages (Ada). */
4561 rtx t1, t2, t3;
4562 unsigned HOST_WIDE_INT d = INTVAL (op1);
4563 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4564 build_int_cst (NULL_TREE, floor_log2 (d)),
4565 tquotient, 0);
4566 t2 = expand_binop (compute_mode, and_optab, op0,
4567 GEN_INT (d - 1),
4568 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4569 t3 = gen_reg_rtx (compute_mode);
4570 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4571 compute_mode, 1, 1);
4572 if (t3 == 0)
4574 rtx lab;
4575 lab = gen_label_rtx ();
4576 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4577 expand_inc (t1, const1_rtx);
4578 emit_label (lab);
4579 quotient = t1;
4581 else
4582 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4583 t1, t3),
4584 tquotient);
4585 break;
4588 /* Try using an instruction that produces both the quotient and
4589 remainder, using truncation. We can easily compensate the
4590 quotient or remainder to get ceiling rounding, once we have the
4591 remainder. Notice that we compute also the final remainder
4592 value here, and return the result right away. */
4593 if (target == 0 || GET_MODE (target) != compute_mode)
4594 target = gen_reg_rtx (compute_mode);
4595 if (rem_flag)
4597 remainder= (REG_P (target)
4598 ? target : gen_reg_rtx (compute_mode));
4599 quotient = gen_reg_rtx (compute_mode);
4601 else
4603 quotient = (REG_P (target)
4604 ? target : gen_reg_rtx (compute_mode));
4605 remainder = gen_reg_rtx (compute_mode);
4608 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4609 remainder, 0))
4611 /* This could be computed with a branch-less sequence.
4612 Save that for later. */
4613 rtx tem;
4614 rtx label = gen_label_rtx ();
4615 do_cmp_and_jump (remainder, const0_rtx, EQ,
4616 compute_mode, label);
4617 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4618 NULL_RTX, 0, OPTAB_WIDEN);
4619 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4620 expand_inc (quotient, const1_rtx);
4621 expand_dec (remainder, op1);
4622 emit_label (label);
4623 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4626 /* No luck with division elimination or divmod. Have to do it
4627 by conditionally adjusting op0 *and* the result. */
4629 rtx label1, label2, label3, label4, label5;
4630 rtx adjusted_op0;
4631 rtx tem;
4633 quotient = gen_reg_rtx (compute_mode);
4634 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4635 label1 = gen_label_rtx ();
4636 label2 = gen_label_rtx ();
4637 label3 = gen_label_rtx ();
4638 label4 = gen_label_rtx ();
4639 label5 = gen_label_rtx ();
4640 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4641 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4642 compute_mode, label1);
4643 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4644 quotient, 0, OPTAB_LIB_WIDEN);
4645 if (tem != quotient)
4646 emit_move_insn (quotient, tem);
4647 emit_jump_insn (gen_jump (label5));
4648 emit_barrier ();
4649 emit_label (label1);
4650 expand_dec (adjusted_op0, const1_rtx);
4651 emit_jump_insn (gen_jump (label4));
4652 emit_barrier ();
4653 emit_label (label2);
4654 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4655 compute_mode, label3);
4656 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4657 quotient, 0, OPTAB_LIB_WIDEN);
4658 if (tem != quotient)
4659 emit_move_insn (quotient, tem);
4660 emit_jump_insn (gen_jump (label5));
4661 emit_barrier ();
4662 emit_label (label3);
4663 expand_inc (adjusted_op0, const1_rtx);
4664 emit_label (label4);
4665 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4666 quotient, 0, OPTAB_LIB_WIDEN);
4667 if (tem != quotient)
4668 emit_move_insn (quotient, tem);
4669 expand_inc (quotient, const1_rtx);
4670 emit_label (label5);
4673 break;
4675 case EXACT_DIV_EXPR:
4676 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4678 HOST_WIDE_INT d = INTVAL (op1);
4679 unsigned HOST_WIDE_INT ml;
4680 int pre_shift;
4681 rtx t1;
4683 pre_shift = floor_log2 (d & -d);
4684 ml = invert_mod2n (d >> pre_shift, size);
4685 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4686 build_int_cst (NULL_TREE, pre_shift),
4687 NULL_RTX, unsignedp);
4688 quotient = expand_mult (compute_mode, t1,
4689 gen_int_mode (ml, compute_mode),
4690 NULL_RTX, 1);
4692 insn = get_last_insn ();
4693 set_unique_reg_note (insn,
4694 REG_EQUAL,
4695 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4696 compute_mode,
4697 op0, op1));
4699 break;
4701 case ROUND_DIV_EXPR:
4702 case ROUND_MOD_EXPR:
4703 if (unsignedp)
4705 rtx tem;
4706 rtx label;
4707 label = gen_label_rtx ();
4708 quotient = gen_reg_rtx (compute_mode);
4709 remainder = gen_reg_rtx (compute_mode);
4710 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4712 rtx tem;
4713 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4714 quotient, 1, OPTAB_LIB_WIDEN);
4715 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4716 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4717 remainder, 1, OPTAB_LIB_WIDEN);
4719 tem = plus_constant (op1, -1);
4720 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4721 build_int_cst (NULL_TREE, 1),
4722 NULL_RTX, 1);
4723 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4724 expand_inc (quotient, const1_rtx);
4725 expand_dec (remainder, op1);
4726 emit_label (label);
4728 else
4730 rtx abs_rem, abs_op1, tem, mask;
4731 rtx label;
4732 label = gen_label_rtx ();
4733 quotient = gen_reg_rtx (compute_mode);
4734 remainder = gen_reg_rtx (compute_mode);
4735 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4737 rtx tem;
4738 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4739 quotient, 0, OPTAB_LIB_WIDEN);
4740 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4741 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4742 remainder, 0, OPTAB_LIB_WIDEN);
4744 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4745 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4746 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4747 build_int_cst (NULL_TREE, 1),
4748 NULL_RTX, 1);
4749 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4750 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4751 NULL_RTX, 0, OPTAB_WIDEN);
4752 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4753 build_int_cst (NULL_TREE, size - 1),
4754 NULL_RTX, 0);
4755 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4756 NULL_RTX, 0, OPTAB_WIDEN);
4757 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4758 NULL_RTX, 0, OPTAB_WIDEN);
4759 expand_inc (quotient, tem);
4760 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4761 NULL_RTX, 0, OPTAB_WIDEN);
4762 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4763 NULL_RTX, 0, OPTAB_WIDEN);
4764 expand_dec (remainder, tem);
4765 emit_label (label);
4767 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4769 default:
4770 gcc_unreachable ();
4773 if (quotient == 0)
4775 if (target && GET_MODE (target) != compute_mode)
4776 target = 0;
4778 if (rem_flag)
4780 /* Try to produce the remainder without producing the quotient.
4781 If we seem to have a divmod pattern that does not require widening,
4782 don't try widening here. We should really have a WIDEN argument
4783 to expand_twoval_binop, since what we'd really like to do here is
4784 1) try a mod insn in compute_mode
4785 2) try a divmod insn in compute_mode
4786 3) try a div insn in compute_mode and multiply-subtract to get
4787 remainder
4788 4) try the same things with widening allowed. */
4789 remainder
4790 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4791 op0, op1, target,
4792 unsignedp,
4793 ((optab_handler (optab2, compute_mode)->insn_code
4794 != CODE_FOR_nothing)
4795 ? OPTAB_DIRECT : OPTAB_WIDEN));
4796 if (remainder == 0)
4798 /* No luck there. Can we do remainder and divide at once
4799 without a library call? */
4800 remainder = gen_reg_rtx (compute_mode);
4801 if (! expand_twoval_binop ((unsignedp
4802 ? udivmod_optab
4803 : sdivmod_optab),
4804 op0, op1,
4805 NULL_RTX, remainder, unsignedp))
4806 remainder = 0;
4809 if (remainder)
4810 return gen_lowpart (mode, remainder);
4813 /* Produce the quotient. Try a quotient insn, but not a library call.
4814 If we have a divmod in this mode, use it in preference to widening
4815 the div (for this test we assume it will not fail). Note that optab2
4816 is set to the one of the two optabs that the call below will use. */
4817 quotient
4818 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4819 op0, op1, rem_flag ? NULL_RTX : target,
4820 unsignedp,
4821 ((optab_handler (optab2, compute_mode)->insn_code
4822 != CODE_FOR_nothing)
4823 ? OPTAB_DIRECT : OPTAB_WIDEN));
4825 if (quotient == 0)
4827 /* No luck there. Try a quotient-and-remainder insn,
4828 keeping the quotient alone. */
4829 quotient = gen_reg_rtx (compute_mode);
4830 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4831 op0, op1,
4832 quotient, NULL_RTX, unsignedp))
4834 quotient = 0;
4835 if (! rem_flag)
4836 /* Still no luck. If we are not computing the remainder,
4837 use a library call for the quotient. */
4838 quotient = sign_expand_binop (compute_mode,
4839 udiv_optab, sdiv_optab,
4840 op0, op1, target,
4841 unsignedp, OPTAB_LIB_WIDEN);
4846 if (rem_flag)
4848 if (target && GET_MODE (target) != compute_mode)
4849 target = 0;
4851 if (quotient == 0)
4853 /* No divide instruction either. Use library for remainder. */
4854 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4855 op0, op1, target,
4856 unsignedp, OPTAB_LIB_WIDEN);
4857 /* No remainder function. Try a quotient-and-remainder
4858 function, keeping the remainder. */
4859 if (!remainder)
4861 remainder = gen_reg_rtx (compute_mode);
4862 if (!expand_twoval_binop_libfunc
4863 (unsignedp ? udivmod_optab : sdivmod_optab,
4864 op0, op1,
4865 NULL_RTX, remainder,
4866 unsignedp ? UMOD : MOD))
4867 remainder = NULL_RTX;
4870 else
4872 /* We divided. Now finish doing X - Y * (X / Y). */
4873 remainder = expand_mult (compute_mode, quotient, op1,
4874 NULL_RTX, unsignedp);
4875 remainder = expand_binop (compute_mode, sub_optab, op0,
4876 remainder, target, unsignedp,
4877 OPTAB_LIB_WIDEN);
4881 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4884 /* Return a tree node with data type TYPE, describing the value of X.
4885 Usually this is an VAR_DECL, if there is no obvious better choice.
4886 X may be an expression, however we only support those expressions
4887 generated by loop.c. */
4889 tree
4890 make_tree (tree type, rtx x)
4892 tree t;
4894 switch (GET_CODE (x))
4896 case CONST_INT:
4898 HOST_WIDE_INT hi = 0;
4900 if (INTVAL (x) < 0
4901 && !(TYPE_UNSIGNED (type)
4902 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4903 < HOST_BITS_PER_WIDE_INT)))
4904 hi = -1;
4906 t = build_int_cst_wide (type, INTVAL (x), hi);
4908 return t;
4911 case CONST_DOUBLE:
4912 if (GET_MODE (x) == VOIDmode)
4913 t = build_int_cst_wide (type,
4914 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4915 else
4917 REAL_VALUE_TYPE d;
4919 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4920 t = build_real (type, d);
4923 return t;
4925 case CONST_VECTOR:
4927 int units = CONST_VECTOR_NUNITS (x);
4928 tree itype = TREE_TYPE (type);
4929 tree t = NULL_TREE;
4930 int i;
4933 /* Build a tree with vector elements. */
4934 for (i = units - 1; i >= 0; --i)
4936 rtx elt = CONST_VECTOR_ELT (x, i);
4937 t = tree_cons (NULL_TREE, make_tree (itype, elt), t);
4940 return build_vector (type, t);
4943 case PLUS:
4944 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4945 make_tree (type, XEXP (x, 1)));
4947 case MINUS:
4948 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4949 make_tree (type, XEXP (x, 1)));
4951 case NEG:
4952 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
4954 case MULT:
4955 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4956 make_tree (type, XEXP (x, 1)));
4958 case ASHIFT:
4959 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4960 make_tree (type, XEXP (x, 1)));
4962 case LSHIFTRT:
4963 t = unsigned_type_for (type);
4964 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4965 make_tree (t, XEXP (x, 0)),
4966 make_tree (type, XEXP (x, 1))));
4968 case ASHIFTRT:
4969 t = signed_type_for (type);
4970 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4971 make_tree (t, XEXP (x, 0)),
4972 make_tree (type, XEXP (x, 1))));
4974 case DIV:
4975 if (TREE_CODE (type) != REAL_TYPE)
4976 t = signed_type_for (type);
4977 else
4978 t = type;
4980 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4981 make_tree (t, XEXP (x, 0)),
4982 make_tree (t, XEXP (x, 1))));
4983 case UDIV:
4984 t = unsigned_type_for (type);
4985 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4986 make_tree (t, XEXP (x, 0)),
4987 make_tree (t, XEXP (x, 1))));
4989 case SIGN_EXTEND:
4990 case ZERO_EXTEND:
4991 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
4992 GET_CODE (x) == ZERO_EXTEND);
4993 return fold_convert (type, make_tree (t, XEXP (x, 0)));
4995 case CONST:
4996 return make_tree (type, XEXP (x, 0));
4998 case SYMBOL_REF:
4999 t = SYMBOL_REF_DECL (x);
5000 if (t)
5001 return fold_convert (type, build_fold_addr_expr (t));
5002 /* else fall through. */
5004 default:
5005 t = build_decl (VAR_DECL, NULL_TREE, type);
5007 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
5008 ptr_mode. So convert. */
5009 if (POINTER_TYPE_P (type))
5010 x = convert_memory_address (TYPE_MODE (type), x);
5012 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5013 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5014 t->decl_with_rtl.rtl = x;
5016 return t;
5020 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5021 and returning TARGET.
5023 If TARGET is 0, a pseudo-register or constant is returned. */
5026 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5028 rtx tem = 0;
5030 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5031 tem = simplify_binary_operation (AND, mode, op0, op1);
5032 if (tem == 0)
5033 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5035 if (target == 0)
5036 target = tem;
5037 else if (tem != target)
5038 emit_move_insn (target, tem);
5039 return target;
5042 /* Helper function for emit_store_flag. */
5043 static rtx
5044 emit_store_flag_1 (rtx target, rtx subtarget, enum machine_mode mode,
5045 int normalizep)
5047 rtx op0;
5048 enum machine_mode target_mode = GET_MODE (target);
5050 /* If we are converting to a wider mode, first convert to
5051 TARGET_MODE, then normalize. This produces better combining
5052 opportunities on machines that have a SIGN_EXTRACT when we are
5053 testing a single bit. This mostly benefits the 68k.
5055 If STORE_FLAG_VALUE does not have the sign bit set when
5056 interpreted in MODE, we can do this conversion as unsigned, which
5057 is usually more efficient. */
5058 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5060 convert_move (target, subtarget,
5061 (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
5062 && 0 == (STORE_FLAG_VALUE
5063 & ((HOST_WIDE_INT) 1
5064 << (GET_MODE_BITSIZE (mode) -1))));
5065 op0 = target;
5066 mode = target_mode;
5068 else
5069 op0 = subtarget;
5071 /* If we want to keep subexpressions around, don't reuse our last
5072 target. */
5073 if (optimize)
5074 subtarget = 0;
5076 /* Now normalize to the proper value in MODE. Sometimes we don't
5077 have to do anything. */
5078 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5080 /* STORE_FLAG_VALUE might be the most negative number, so write
5081 the comparison this way to avoid a compiler-time warning. */
5082 else if (- normalizep == STORE_FLAG_VALUE)
5083 op0 = expand_unop (mode, neg_optab, op0, subtarget, 0);
5085 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5086 it hard to use a value of just the sign bit due to ANSI integer
5087 constant typing rules. */
5088 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5089 && (STORE_FLAG_VALUE
5090 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))))
5091 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5092 size_int (GET_MODE_BITSIZE (mode) - 1), subtarget,
5093 normalizep == 1);
5094 else
5096 gcc_assert (STORE_FLAG_VALUE & 1);
5098 op0 = expand_and (mode, op0, const1_rtx, subtarget);
5099 if (normalizep == -1)
5100 op0 = expand_unop (mode, neg_optab, op0, op0, 0);
5103 /* If we were converting to a smaller mode, do the conversion now. */
5104 if (target_mode != mode)
5106 convert_move (target, op0, 0);
5107 return target;
5109 else
5110 return op0;
5113 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5114 and storing in TARGET. Normally return TARGET.
5115 Return 0 if that cannot be done.
5117 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5118 it is VOIDmode, they cannot both be CONST_INT.
5120 UNSIGNEDP is for the case where we have to widen the operands
5121 to perform the operation. It says to use zero-extension.
5123 NORMALIZEP is 1 if we should convert the result to be either zero
5124 or one. Normalize is -1 if we should convert the result to be
5125 either zero or -1. If NORMALIZEP is zero, the result will be left
5126 "raw" out of the scc insn. */
5129 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5130 enum machine_mode mode, int unsignedp, int normalizep)
5132 rtx subtarget;
5133 enum insn_code icode;
5134 enum machine_mode compare_mode;
5135 enum machine_mode target_mode = GET_MODE (target);
5136 rtx tem;
5137 rtx last = get_last_insn ();
5138 rtx pattern, comparison;
5140 if (unsignedp)
5141 code = unsigned_condition (code);
5143 /* If one operand is constant, make it the second one. Only do this
5144 if the other operand is not constant as well. */
5146 if (swap_commutative_operands_p (op0, op1))
5148 tem = op0;
5149 op0 = op1;
5150 op1 = tem;
5151 code = swap_condition (code);
5154 if (mode == VOIDmode)
5155 mode = GET_MODE (op0);
5157 /* For some comparisons with 1 and -1, we can convert this to
5158 comparisons with zero. This will often produce more opportunities for
5159 store-flag insns. */
5161 switch (code)
5163 case LT:
5164 if (op1 == const1_rtx)
5165 op1 = const0_rtx, code = LE;
5166 break;
5167 case LE:
5168 if (op1 == constm1_rtx)
5169 op1 = const0_rtx, code = LT;
5170 break;
5171 case GE:
5172 if (op1 == const1_rtx)
5173 op1 = const0_rtx, code = GT;
5174 break;
5175 case GT:
5176 if (op1 == constm1_rtx)
5177 op1 = const0_rtx, code = GE;
5178 break;
5179 case GEU:
5180 if (op1 == const1_rtx)
5181 op1 = const0_rtx, code = NE;
5182 break;
5183 case LTU:
5184 if (op1 == const1_rtx)
5185 op1 = const0_rtx, code = EQ;
5186 break;
5187 default:
5188 break;
5191 /* If we are comparing a double-word integer with zero or -1, we can
5192 convert the comparison into one involving a single word. */
5193 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5194 && GET_MODE_CLASS (mode) == MODE_INT
5195 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5197 if ((code == EQ || code == NE)
5198 && (op1 == const0_rtx || op1 == constm1_rtx))
5200 rtx op00, op01, op0both;
5202 /* Do a logical OR or AND of the two words and compare the
5203 result. */
5204 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5205 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5206 op0both = expand_binop (word_mode,
5207 op1 == const0_rtx ? ior_optab : and_optab,
5208 op00, op01, NULL_RTX, unsignedp,
5209 OPTAB_DIRECT);
5211 if (op0both != 0)
5212 return emit_store_flag (target, code, op0both, op1, word_mode,
5213 unsignedp, normalizep);
5215 else if ((code == LT || code == GE) && op1 == const0_rtx)
5217 rtx op0h;
5219 /* If testing the sign bit, can just test on high word. */
5220 op0h = simplify_gen_subreg (word_mode, op0, mode,
5221 subreg_highpart_offset (word_mode,
5222 mode));
5223 return emit_store_flag (target, code, op0h, op1, word_mode,
5224 unsignedp, normalizep);
5228 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5229 complement of A (for GE) and shifting the sign bit to the low bit. */
5230 if (op1 == const0_rtx && (code == LT || code == GE)
5231 && GET_MODE_CLASS (mode) == MODE_INT
5232 && (normalizep || STORE_FLAG_VALUE == 1
5233 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5234 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5235 == ((unsigned HOST_WIDE_INT) 1
5236 << (GET_MODE_BITSIZE (mode) - 1))))))
5238 subtarget = target;
5240 /* If the result is to be wider than OP0, it is best to convert it
5241 first. If it is to be narrower, it is *incorrect* to convert it
5242 first. */
5243 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5245 op0 = convert_modes (target_mode, mode, op0, 0);
5246 mode = target_mode;
5249 if (target_mode != mode)
5250 subtarget = 0;
5252 if (code == GE)
5253 op0 = expand_unop (mode, one_cmpl_optab, op0,
5254 ((STORE_FLAG_VALUE == 1 || normalizep)
5255 ? 0 : subtarget), 0);
5257 if (STORE_FLAG_VALUE == 1 || normalizep)
5258 /* If we are supposed to produce a 0/1 value, we want to do
5259 a logical shift from the sign bit to the low-order bit; for
5260 a -1/0 value, we do an arithmetic shift. */
5261 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5262 size_int (GET_MODE_BITSIZE (mode) - 1),
5263 subtarget, normalizep != -1);
5265 if (mode != target_mode)
5266 op0 = convert_modes (target_mode, mode, op0, 0);
5268 return op0;
5271 icode = setcc_gen_code[(int) code];
5273 if (icode != CODE_FOR_nothing)
5275 insn_operand_predicate_fn pred;
5277 /* We think we may be able to do this with a scc insn. Emit the
5278 comparison and then the scc insn. */
5280 do_pending_stack_adjust ();
5281 last = get_last_insn ();
5283 comparison
5284 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
5285 if (CONSTANT_P (comparison))
5287 switch (GET_CODE (comparison))
5289 case CONST_INT:
5290 if (comparison == const0_rtx)
5291 return const0_rtx;
5292 break;
5294 #ifdef FLOAT_STORE_FLAG_VALUE
5295 case CONST_DOUBLE:
5296 if (comparison == CONST0_RTX (GET_MODE (comparison)))
5297 return const0_rtx;
5298 break;
5299 #endif
5300 default:
5301 gcc_unreachable ();
5304 if (normalizep == 1)
5305 return const1_rtx;
5306 if (normalizep == -1)
5307 return constm1_rtx;
5308 return const_true_rtx;
5311 /* The code of COMPARISON may not match CODE if compare_from_rtx
5312 decided to swap its operands and reverse the original code.
5314 We know that compare_from_rtx returns either a CONST_INT or
5315 a new comparison code, so it is safe to just extract the
5316 code from COMPARISON. */
5317 code = GET_CODE (comparison);
5319 /* Get a reference to the target in the proper mode for this insn. */
5320 compare_mode = insn_data[(int) icode].operand[0].mode;
5321 subtarget = target;
5322 pred = insn_data[(int) icode].operand[0].predicate;
5323 if (optimize || ! (*pred) (subtarget, compare_mode))
5324 subtarget = gen_reg_rtx (compare_mode);
5326 pattern = GEN_FCN (icode) (subtarget);
5327 if (pattern)
5329 emit_insn (pattern);
5330 return emit_store_flag_1 (target, subtarget, compare_mode,
5331 normalizep);
5334 else
5336 /* We don't have an scc insn, so try a cstore insn. */
5338 for (compare_mode = mode; compare_mode != VOIDmode;
5339 compare_mode = GET_MODE_WIDER_MODE (compare_mode))
5341 icode = optab_handler (cstore_optab, compare_mode)->insn_code;
5342 if (icode != CODE_FOR_nothing)
5343 break;
5346 if (icode != CODE_FOR_nothing)
5348 enum machine_mode result_mode
5349 = insn_data[(int) icode].operand[0].mode;
5350 rtx cstore_op0 = op0;
5351 rtx cstore_op1 = op1;
5353 do_pending_stack_adjust ();
5354 last = get_last_insn ();
5356 if (compare_mode != mode)
5358 cstore_op0 = convert_modes (compare_mode, mode, cstore_op0,
5359 unsignedp);
5360 cstore_op1 = convert_modes (compare_mode, mode, cstore_op1,
5361 unsignedp);
5364 if (!insn_data[(int) icode].operand[2].predicate (cstore_op0,
5365 compare_mode))
5366 cstore_op0 = copy_to_mode_reg (compare_mode, cstore_op0);
5368 if (!insn_data[(int) icode].operand[3].predicate (cstore_op1,
5369 compare_mode))
5370 cstore_op1 = copy_to_mode_reg (compare_mode, cstore_op1);
5372 comparison = gen_rtx_fmt_ee (code, result_mode, cstore_op0,
5373 cstore_op1);
5374 subtarget = target;
5376 if (optimize || !(insn_data[(int) icode].operand[0].predicate
5377 (subtarget, result_mode)))
5378 subtarget = gen_reg_rtx (result_mode);
5380 pattern = GEN_FCN (icode) (subtarget, comparison, cstore_op0,
5381 cstore_op1);
5383 if (pattern)
5385 emit_insn (pattern);
5386 return emit_store_flag_1 (target, subtarget, result_mode,
5387 normalizep);
5392 delete_insns_since (last);
5394 /* If optimizing, use different pseudo registers for each insn, instead
5395 of reusing the same pseudo. This leads to better CSE, but slows
5396 down the compiler, since there are more pseudos */
5397 subtarget = (!optimize
5398 && (target_mode == mode)) ? target : NULL_RTX;
5400 /* If we reached here, we can't do this with a scc insn. However, there
5401 are some comparisons that can be done directly. For example, if
5402 this is an equality comparison of integers, we can try to exclusive-or
5403 (or subtract) the two operands and use a recursive call to try the
5404 comparison with zero. Don't do any of these cases if branches are
5405 very cheap. */
5407 if (BRANCH_COST (optimize_insn_for_speed_p (),
5408 false) > 0
5409 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
5410 && op1 != const0_rtx)
5412 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5413 OPTAB_WIDEN);
5415 if (tem == 0)
5416 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5417 OPTAB_WIDEN);
5418 if (tem != 0)
5419 tem = emit_store_flag (target, code, tem, const0_rtx,
5420 mode, unsignedp, normalizep);
5421 if (tem == 0)
5422 delete_insns_since (last);
5423 return tem;
5426 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5427 the constant zero. Reject all other comparisons at this point. Only
5428 do LE and GT if branches are expensive since they are expensive on
5429 2-operand machines. */
5431 if (BRANCH_COST (optimize_insn_for_speed_p (),
5432 false) == 0
5433 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
5434 || (code != EQ && code != NE
5435 && (BRANCH_COST (optimize_insn_for_speed_p (),
5436 false) <= 1 || (code != LE && code != GT))))
5437 return 0;
5439 /* See what we need to return. We can only return a 1, -1, or the
5440 sign bit. */
5442 if (normalizep == 0)
5444 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5445 normalizep = STORE_FLAG_VALUE;
5447 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5448 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5449 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5451 else
5452 return 0;
5455 /* Try to put the result of the comparison in the sign bit. Assume we can't
5456 do the necessary operation below. */
5458 tem = 0;
5460 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5461 the sign bit set. */
5463 if (code == LE)
5465 /* This is destructive, so SUBTARGET can't be OP0. */
5466 if (rtx_equal_p (subtarget, op0))
5467 subtarget = 0;
5469 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5470 OPTAB_WIDEN);
5471 if (tem)
5472 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5473 OPTAB_WIDEN);
5476 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5477 number of bits in the mode of OP0, minus one. */
5479 if (code == GT)
5481 if (rtx_equal_p (subtarget, op0))
5482 subtarget = 0;
5484 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5485 size_int (GET_MODE_BITSIZE (mode) - 1),
5486 subtarget, 0);
5487 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5488 OPTAB_WIDEN);
5491 if (code == EQ || code == NE)
5493 /* For EQ or NE, one way to do the comparison is to apply an operation
5494 that converts the operand into a positive number if it is nonzero
5495 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5496 for NE we negate. This puts the result in the sign bit. Then we
5497 normalize with a shift, if needed.
5499 Two operations that can do the above actions are ABS and FFS, so try
5500 them. If that doesn't work, and MODE is smaller than a full word,
5501 we can use zero-extension to the wider mode (an unsigned conversion)
5502 as the operation. */
5504 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5505 that is compensated by the subsequent overflow when subtracting
5506 one / negating. */
5508 if (optab_handler (abs_optab, mode)->insn_code != CODE_FOR_nothing)
5509 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5510 else if (optab_handler (ffs_optab, mode)->insn_code != CODE_FOR_nothing)
5511 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5512 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5514 tem = convert_modes (word_mode, mode, op0, 1);
5515 mode = word_mode;
5518 if (tem != 0)
5520 if (code == EQ)
5521 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5522 0, OPTAB_WIDEN);
5523 else
5524 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5527 /* If we couldn't do it that way, for NE we can "or" the two's complement
5528 of the value with itself. For EQ, we take the one's complement of
5529 that "or", which is an extra insn, so we only handle EQ if branches
5530 are expensive. */
5532 if (tem == 0
5533 && (code == NE
5534 || BRANCH_COST (optimize_insn_for_speed_p (),
5535 false) > 1))
5537 if (rtx_equal_p (subtarget, op0))
5538 subtarget = 0;
5540 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5541 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5542 OPTAB_WIDEN);
5544 if (tem && code == EQ)
5545 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5549 if (tem && normalizep)
5550 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5551 size_int (GET_MODE_BITSIZE (mode) - 1),
5552 subtarget, normalizep == 1);
5554 if (tem)
5556 if (GET_MODE (tem) != target_mode)
5558 convert_move (target, tem, 0);
5559 tem = target;
5561 else if (!subtarget)
5563 emit_move_insn (target, tem);
5564 tem = target;
5567 else
5568 delete_insns_since (last);
5570 return tem;
5573 /* Like emit_store_flag, but always succeeds. */
5576 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5577 enum machine_mode mode, int unsignedp, int normalizep)
5579 rtx tem, label;
5581 /* First see if emit_store_flag can do the job. */
5582 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5583 if (tem != 0)
5584 return tem;
5586 if (normalizep == 0)
5587 normalizep = 1;
5589 /* If this failed, we have to do this with set/compare/jump/set code. */
5591 if (!REG_P (target)
5592 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5593 target = gen_reg_rtx (GET_MODE (target));
5595 emit_move_insn (target, const1_rtx);
5596 label = gen_label_rtx ();
5597 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5598 NULL_RTX, label);
5600 emit_move_insn (target, const0_rtx);
5601 emit_label (label);
5603 return target;
5606 /* Perform possibly multi-word comparison and conditional jump to LABEL
5607 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5608 now a thin wrapper around do_compare_rtx_and_jump. */
5610 static void
5611 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5612 rtx label)
5614 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5615 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5616 NULL_RTX, NULL_RTX, label);