* basic-block.h, config/i386/winnt.c, config/pa/pa.c,
[official-gcc.git] / gcc / expmed.c
blob28f876ba22b4f672a0a71bc9111a63a77efb98d0
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "tm_p.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "optabs.h"
36 #include "real.h"
37 #include "recog.h"
38 #include "langhooks.h"
40 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
41 unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT, rtx);
43 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx);
45 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
46 unsigned HOST_WIDE_INT,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT, rtx, int);
49 static rtx mask_rtx (enum machine_mode, int, int, int);
50 static rtx lshift_value (enum machine_mode, rtx, int, int);
51 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, int);
53 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
54 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
55 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
57 /* Test whether a value is zero of a power of two. */
58 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
60 /* Nonzero means divides or modulus operations are relatively cheap for
61 powers of two, so don't use branches; emit the operation instead.
62 Usually, this will mean that the MD file will emit non-branch
63 sequences. */
65 static bool sdiv_pow2_cheap[NUM_MACHINE_MODES];
66 static bool smod_pow2_cheap[NUM_MACHINE_MODES];
68 #ifndef SLOW_UNALIGNED_ACCESS
69 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
70 #endif
72 /* For compilers that support multiple targets with different word sizes,
73 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
74 is the H8/300(H) compiler. */
76 #ifndef MAX_BITS_PER_WORD
77 #define MAX_BITS_PER_WORD BITS_PER_WORD
78 #endif
80 /* Reduce conditional compilation elsewhere. */
81 #ifndef HAVE_insv
82 #define HAVE_insv 0
83 #define CODE_FOR_insv CODE_FOR_nothing
84 #define gen_insv(a,b,c,d) NULL_RTX
85 #endif
86 #ifndef HAVE_extv
87 #define HAVE_extv 0
88 #define CODE_FOR_extv CODE_FOR_nothing
89 #define gen_extv(a,b,c,d) NULL_RTX
90 #endif
91 #ifndef HAVE_extzv
92 #define HAVE_extzv 0
93 #define CODE_FOR_extzv CODE_FOR_nothing
94 #define gen_extzv(a,b,c,d) NULL_RTX
95 #endif
97 /* Cost of various pieces of RTL. Note that some of these are indexed by
98 shift count and some by mode. */
99 static int zero_cost;
100 static int add_cost[NUM_MACHINE_MODES];
101 static int neg_cost[NUM_MACHINE_MODES];
102 static int shift_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
103 static int shiftadd_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
104 static int shiftsub_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
105 static int mul_cost[NUM_MACHINE_MODES];
106 static int sdiv_cost[NUM_MACHINE_MODES];
107 static int udiv_cost[NUM_MACHINE_MODES];
108 static int mul_widen_cost[NUM_MACHINE_MODES];
109 static int mul_highpart_cost[NUM_MACHINE_MODES];
111 void
112 init_expmed (void)
114 struct
116 struct rtx_def reg; rtunion reg_fld[2];
117 struct rtx_def plus; rtunion plus_fld1;
118 struct rtx_def neg;
119 struct rtx_def mult; rtunion mult_fld1;
120 struct rtx_def sdiv; rtunion sdiv_fld1;
121 struct rtx_def udiv; rtunion udiv_fld1;
122 struct rtx_def zext;
123 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
124 struct rtx_def smod_32; rtunion smod_32_fld1;
125 struct rtx_def wide_mult; rtunion wide_mult_fld1;
126 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
127 struct rtx_def wide_trunc;
128 struct rtx_def shift; rtunion shift_fld1;
129 struct rtx_def shift_mult; rtunion shift_mult_fld1;
130 struct rtx_def shift_add; rtunion shift_add_fld1;
131 struct rtx_def shift_sub; rtunion shift_sub_fld1;
132 } all;
134 rtx pow2[MAX_BITS_PER_WORD];
135 rtx cint[MAX_BITS_PER_WORD];
136 int m, n;
137 enum machine_mode mode, wider_mode;
139 zero_cost = rtx_cost (const0_rtx, 0);
141 for (m = 1; m < MAX_BITS_PER_WORD; m++)
143 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
144 cint[m] = GEN_INT (m);
147 memset (&all, 0, sizeof all);
149 PUT_CODE (&all.reg, REG);
150 /* Avoid using hard regs in ways which may be unsupported. */
151 REGNO (&all.reg) = LAST_VIRTUAL_REGISTER + 1;
153 PUT_CODE (&all.plus, PLUS);
154 XEXP (&all.plus, 0) = &all.reg;
155 XEXP (&all.plus, 1) = &all.reg;
157 PUT_CODE (&all.neg, NEG);
158 XEXP (&all.neg, 0) = &all.reg;
160 PUT_CODE (&all.mult, MULT);
161 XEXP (&all.mult, 0) = &all.reg;
162 XEXP (&all.mult, 1) = &all.reg;
164 PUT_CODE (&all.sdiv, DIV);
165 XEXP (&all.sdiv, 0) = &all.reg;
166 XEXP (&all.sdiv, 1) = &all.reg;
168 PUT_CODE (&all.udiv, UDIV);
169 XEXP (&all.udiv, 0) = &all.reg;
170 XEXP (&all.udiv, 1) = &all.reg;
172 PUT_CODE (&all.sdiv_32, DIV);
173 XEXP (&all.sdiv_32, 0) = &all.reg;
174 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
176 PUT_CODE (&all.smod_32, MOD);
177 XEXP (&all.smod_32, 0) = &all.reg;
178 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
180 PUT_CODE (&all.zext, ZERO_EXTEND);
181 XEXP (&all.zext, 0) = &all.reg;
183 PUT_CODE (&all.wide_mult, MULT);
184 XEXP (&all.wide_mult, 0) = &all.zext;
185 XEXP (&all.wide_mult, 1) = &all.zext;
187 PUT_CODE (&all.wide_lshr, LSHIFTRT);
188 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
190 PUT_CODE (&all.wide_trunc, TRUNCATE);
191 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
193 PUT_CODE (&all.shift, ASHIFT);
194 XEXP (&all.shift, 0) = &all.reg;
196 PUT_CODE (&all.shift_mult, MULT);
197 XEXP (&all.shift_mult, 0) = &all.reg;
199 PUT_CODE (&all.shift_add, PLUS);
200 XEXP (&all.shift_add, 0) = &all.shift_mult;
201 XEXP (&all.shift_add, 1) = &all.reg;
203 PUT_CODE (&all.shift_sub, MINUS);
204 XEXP (&all.shift_sub, 0) = &all.shift_mult;
205 XEXP (&all.shift_sub, 1) = &all.reg;
207 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
208 mode != VOIDmode;
209 mode = GET_MODE_WIDER_MODE (mode))
211 PUT_MODE (&all.reg, mode);
212 PUT_MODE (&all.plus, mode);
213 PUT_MODE (&all.neg, mode);
214 PUT_MODE (&all.mult, mode);
215 PUT_MODE (&all.sdiv, mode);
216 PUT_MODE (&all.udiv, mode);
217 PUT_MODE (&all.sdiv_32, mode);
218 PUT_MODE (&all.smod_32, mode);
219 PUT_MODE (&all.wide_trunc, mode);
220 PUT_MODE (&all.shift, mode);
221 PUT_MODE (&all.shift_mult, mode);
222 PUT_MODE (&all.shift_add, mode);
223 PUT_MODE (&all.shift_sub, mode);
225 add_cost[mode] = rtx_cost (&all.plus, SET);
226 neg_cost[mode] = rtx_cost (&all.neg, SET);
227 mul_cost[mode] = rtx_cost (&all.mult, SET);
228 sdiv_cost[mode] = rtx_cost (&all.sdiv, SET);
229 udiv_cost[mode] = rtx_cost (&all.udiv, SET);
231 sdiv_pow2_cheap[mode] = (rtx_cost (&all.sdiv_32, SET)
232 <= 2 * add_cost[mode]);
233 smod_pow2_cheap[mode] = (rtx_cost (&all.smod_32, SET)
234 <= 4 * add_cost[mode]);
236 wider_mode = GET_MODE_WIDER_MODE (mode);
237 if (wider_mode != VOIDmode)
239 PUT_MODE (&all.zext, wider_mode);
240 PUT_MODE (&all.wide_mult, wider_mode);
241 PUT_MODE (&all.wide_lshr, wider_mode);
242 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
244 mul_widen_cost[wider_mode] = rtx_cost (&all.wide_mult, SET);
245 mul_highpart_cost[mode] = rtx_cost (&all.wide_trunc, SET);
248 shift_cost[mode][0] = 0;
249 shiftadd_cost[mode][0] = shiftsub_cost[mode][0] = add_cost[mode];
251 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
252 for (m = 1; m < n; m++)
254 XEXP (&all.shift, 1) = cint[m];
255 XEXP (&all.shift_mult, 1) = pow2[m];
257 shift_cost[mode][m] = rtx_cost (&all.shift, SET);
258 shiftadd_cost[mode][m] = rtx_cost (&all.shift_add, SET);
259 shiftsub_cost[mode][m] = rtx_cost (&all.shift_sub, SET);
264 /* Return an rtx representing minus the value of X.
265 MODE is the intended mode of the result,
266 useful if X is a CONST_INT. */
269 negate_rtx (enum machine_mode mode, rtx x)
271 rtx result = simplify_unary_operation (NEG, mode, x, mode);
273 if (result == 0)
274 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
276 return result;
279 /* Report on the availability of insv/extv/extzv and the desired mode
280 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
281 is false; else the mode of the specified operand. If OPNO is -1,
282 all the caller cares about is whether the insn is available. */
283 enum machine_mode
284 mode_for_extraction (enum extraction_pattern pattern, int opno)
286 const struct insn_data *data;
288 switch (pattern)
290 case EP_insv:
291 if (HAVE_insv)
293 data = &insn_data[CODE_FOR_insv];
294 break;
296 return MAX_MACHINE_MODE;
298 case EP_extv:
299 if (HAVE_extv)
301 data = &insn_data[CODE_FOR_extv];
302 break;
304 return MAX_MACHINE_MODE;
306 case EP_extzv:
307 if (HAVE_extzv)
309 data = &insn_data[CODE_FOR_extzv];
310 break;
312 return MAX_MACHINE_MODE;
314 default:
315 gcc_unreachable ();
318 if (opno == -1)
319 return VOIDmode;
321 /* Everyone who uses this function used to follow it with
322 if (result == VOIDmode) result = word_mode; */
323 if (data->operand[opno].mode == VOIDmode)
324 return word_mode;
325 return data->operand[opno].mode;
329 /* Generate code to store value from rtx VALUE
330 into a bit-field within structure STR_RTX
331 containing BITSIZE bits starting at bit BITNUM.
332 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
333 ALIGN is the alignment that STR_RTX is known to have.
334 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
336 /* ??? Note that there are two different ideas here for how
337 to determine the size to count bits within, for a register.
338 One is BITS_PER_WORD, and the other is the size of operand 3
339 of the insv pattern.
341 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
342 else, we use the mode of operand 3. */
345 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
346 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
347 rtx value)
349 unsigned int unit
350 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
351 unsigned HOST_WIDE_INT offset, bitpos;
352 rtx op0 = str_rtx;
353 int byte_offset;
354 rtx orig_value;
356 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
358 while (GET_CODE (op0) == SUBREG)
360 /* The following line once was done only if WORDS_BIG_ENDIAN,
361 but I think that is a mistake. WORDS_BIG_ENDIAN is
362 meaningful at a much higher level; when structures are copied
363 between memory and regs, the higher-numbered regs
364 always get higher addresses. */
365 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
366 op0 = SUBREG_REG (op0);
369 /* No action is needed if the target is a register and if the field
370 lies completely outside that register. This can occur if the source
371 code contains an out-of-bounds access to a small array. */
372 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
373 return value;
375 /* Use vec_set patterns for inserting parts of vectors whenever
376 available. */
377 if (VECTOR_MODE_P (GET_MODE (op0))
378 && !MEM_P (op0)
379 && (vec_set_optab->handlers[GET_MODE (op0)].insn_code
380 != CODE_FOR_nothing)
381 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
382 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
383 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
385 enum machine_mode outermode = GET_MODE (op0);
386 enum machine_mode innermode = GET_MODE_INNER (outermode);
387 int icode = (int) vec_set_optab->handlers[outermode].insn_code;
388 int pos = bitnum / GET_MODE_BITSIZE (innermode);
389 rtx rtxpos = GEN_INT (pos);
390 rtx src = value;
391 rtx dest = op0;
392 rtx pat, seq;
393 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
394 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
395 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
397 start_sequence ();
399 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
400 src = copy_to_mode_reg (mode1, src);
402 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
403 rtxpos = copy_to_mode_reg (mode1, rtxpos);
405 /* We could handle this, but we should always be called with a pseudo
406 for our targets and all insns should take them as outputs. */
407 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
408 && (*insn_data[icode].operand[1].predicate) (src, mode1)
409 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
410 pat = GEN_FCN (icode) (dest, src, rtxpos);
411 seq = get_insns ();
412 end_sequence ();
413 if (pat)
415 emit_insn (seq);
416 emit_insn (pat);
417 return dest;
421 /* If the target is a register, overwriting the entire object, or storing
422 a full-word or multi-word field can be done with just a SUBREG.
424 If the target is memory, storing any naturally aligned field can be
425 done with a simple store. For targets that support fast unaligned
426 memory, any naturally sized, unit aligned field can be done directly. */
428 offset = bitnum / unit;
429 bitpos = bitnum % unit;
430 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
431 + (offset * UNITS_PER_WORD);
433 if (bitpos == 0
434 && bitsize == GET_MODE_BITSIZE (fieldmode)
435 && (!MEM_P (op0)
436 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
437 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
438 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
439 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
440 || (offset * BITS_PER_UNIT % bitsize == 0
441 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
443 if (MEM_P (op0))
444 op0 = adjust_address (op0, fieldmode, offset);
445 else if (GET_MODE (op0) != fieldmode)
446 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
447 byte_offset);
448 emit_move_insn (op0, value);
449 return value;
452 /* Make sure we are playing with integral modes. Pun with subregs
453 if we aren't. This must come after the entire register case above,
454 since that case is valid for any mode. The following cases are only
455 valid for integral modes. */
457 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
458 if (imode != GET_MODE (op0))
460 if (MEM_P (op0))
461 op0 = adjust_address (op0, imode, 0);
462 else
464 gcc_assert (imode != BLKmode);
465 op0 = gen_lowpart (imode, op0);
470 /* We may be accessing data outside the field, which means
471 we can alias adjacent data. */
472 if (MEM_P (op0))
474 op0 = shallow_copy_rtx (op0);
475 set_mem_alias_set (op0, 0);
476 set_mem_expr (op0, 0);
479 /* If OP0 is a register, BITPOS must count within a word.
480 But as we have it, it counts within whatever size OP0 now has.
481 On a bigendian machine, these are not the same, so convert. */
482 if (BYTES_BIG_ENDIAN
483 && !MEM_P (op0)
484 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
485 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
487 /* Storing an lsb-aligned field in a register
488 can be done with a movestrict instruction. */
490 if (!MEM_P (op0)
491 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
492 && bitsize == GET_MODE_BITSIZE (fieldmode)
493 && (movstrict_optab->handlers[fieldmode].insn_code
494 != CODE_FOR_nothing))
496 int icode = movstrict_optab->handlers[fieldmode].insn_code;
498 /* Get appropriate low part of the value being stored. */
499 if (GET_CODE (value) == CONST_INT || REG_P (value))
500 value = gen_lowpart (fieldmode, value);
501 else if (!(GET_CODE (value) == SYMBOL_REF
502 || GET_CODE (value) == LABEL_REF
503 || GET_CODE (value) == CONST))
504 value = convert_to_mode (fieldmode, value, 0);
506 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
507 value = copy_to_mode_reg (fieldmode, value);
509 if (GET_CODE (op0) == SUBREG)
511 /* Else we've got some float mode source being extracted into
512 a different float mode destination -- this combination of
513 subregs results in Severe Tire Damage. */
514 gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
515 || GET_MODE_CLASS (fieldmode) == MODE_INT
516 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
517 op0 = SUBREG_REG (op0);
520 emit_insn (GEN_FCN (icode)
521 (gen_rtx_SUBREG (fieldmode, op0,
522 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
523 + (offset * UNITS_PER_WORD)),
524 value));
526 return value;
529 /* Handle fields bigger than a word. */
531 if (bitsize > BITS_PER_WORD)
533 /* Here we transfer the words of the field
534 in the order least significant first.
535 This is because the most significant word is the one which may
536 be less than full.
537 However, only do that if the value is not BLKmode. */
539 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
540 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
541 unsigned int i;
543 /* This is the mode we must force value to, so that there will be enough
544 subwords to extract. Note that fieldmode will often (always?) be
545 VOIDmode, because that is what store_field uses to indicate that this
546 is a bit field, but passing VOIDmode to operand_subword_force
547 is not allowed. */
548 fieldmode = GET_MODE (value);
549 if (fieldmode == VOIDmode)
550 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
552 for (i = 0; i < nwords; i++)
554 /* If I is 0, use the low-order word in both field and target;
555 if I is 1, use the next to lowest word; and so on. */
556 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
557 unsigned int bit_offset = (backwards
558 ? MAX ((int) bitsize - ((int) i + 1)
559 * BITS_PER_WORD,
561 : (int) i * BITS_PER_WORD);
563 store_bit_field (op0, MIN (BITS_PER_WORD,
564 bitsize - i * BITS_PER_WORD),
565 bitnum + bit_offset, word_mode,
566 operand_subword_force (value, wordnum, fieldmode));
568 return value;
571 /* From here on we can assume that the field to be stored in is
572 a full-word (whatever type that is), since it is shorter than a word. */
574 /* OFFSET is the number of words or bytes (UNIT says which)
575 from STR_RTX to the first word or byte containing part of the field. */
577 if (!MEM_P (op0))
579 if (offset != 0
580 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
582 if (!REG_P (op0))
584 /* Since this is a destination (lvalue), we can't copy
585 it to a pseudo. We can remove a SUBREG that does not
586 change the size of the operand. Such a SUBREG may
587 have been added above. */
588 gcc_assert (GET_CODE (op0) == SUBREG
589 && (GET_MODE_SIZE (GET_MODE (op0))
590 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
591 op0 = SUBREG_REG (op0);
593 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
594 op0, (offset * UNITS_PER_WORD));
596 offset = 0;
599 /* If VALUE has a floating-point or complex mode, access it as an
600 integer of the corresponding size. This can occur on a machine
601 with 64 bit registers that uses SFmode for float. It can also
602 occur for unaligned float or complex fields. */
603 orig_value = value;
604 if (GET_MODE (value) != VOIDmode
605 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
606 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
608 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
609 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
612 /* Now OFFSET is nonzero only if OP0 is memory
613 and is therefore always measured in bytes. */
615 if (HAVE_insv
616 && GET_MODE (value) != BLKmode
617 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
618 && bitsize > 0
619 && GET_MODE_BITSIZE (op_mode) >= bitsize
620 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
621 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
623 int xbitpos = bitpos;
624 rtx value1;
625 rtx xop0 = op0;
626 rtx last = get_last_insn ();
627 rtx pat;
628 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
629 int save_volatile_ok = volatile_ok;
631 volatile_ok = 1;
633 /* If this machine's insv can only insert into a register, copy OP0
634 into a register and save it back later. */
635 if (MEM_P (op0)
636 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
637 (op0, VOIDmode)))
639 rtx tempreg;
640 enum machine_mode bestmode;
642 /* Get the mode to use for inserting into this field. If OP0 is
643 BLKmode, get the smallest mode consistent with the alignment. If
644 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
645 mode. Otherwise, use the smallest mode containing the field. */
647 if (GET_MODE (op0) == BLKmode
648 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
649 bestmode
650 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
651 MEM_VOLATILE_P (op0));
652 else
653 bestmode = GET_MODE (op0);
655 if (bestmode == VOIDmode
656 || GET_MODE_SIZE (bestmode) < GET_MODE_SIZE (fieldmode)
657 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
658 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
659 goto insv_loses;
661 /* Adjust address to point to the containing unit of that mode.
662 Compute offset as multiple of this unit, counting in bytes. */
663 unit = GET_MODE_BITSIZE (bestmode);
664 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
665 bitpos = bitnum % unit;
666 op0 = adjust_address (op0, bestmode, offset);
668 /* Fetch that unit, store the bitfield in it, then store
669 the unit. */
670 tempreg = copy_to_reg (op0);
671 store_bit_field (tempreg, bitsize, bitpos, fieldmode, orig_value);
672 emit_move_insn (op0, tempreg);
673 return value;
675 volatile_ok = save_volatile_ok;
677 /* Add OFFSET into OP0's address. */
678 if (MEM_P (xop0))
679 xop0 = adjust_address (xop0, byte_mode, offset);
681 /* If xop0 is a register, we need it in MAXMODE
682 to make it acceptable to the format of insv. */
683 if (GET_CODE (xop0) == SUBREG)
684 /* We can't just change the mode, because this might clobber op0,
685 and we will need the original value of op0 if insv fails. */
686 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
687 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
688 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
690 /* On big-endian machines, we count bits from the most significant.
691 If the bit field insn does not, we must invert. */
693 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
694 xbitpos = unit - bitsize - xbitpos;
696 /* We have been counting XBITPOS within UNIT.
697 Count instead within the size of the register. */
698 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
699 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
701 unit = GET_MODE_BITSIZE (maxmode);
703 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
704 value1 = value;
705 if (GET_MODE (value) != maxmode)
707 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
709 /* Optimization: Don't bother really extending VALUE
710 if it has all the bits we will actually use. However,
711 if we must narrow it, be sure we do it correctly. */
713 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
715 rtx tmp;
717 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
718 if (! tmp)
719 tmp = simplify_gen_subreg (maxmode,
720 force_reg (GET_MODE (value),
721 value1),
722 GET_MODE (value), 0);
723 value1 = tmp;
725 else
726 value1 = gen_lowpart (maxmode, value1);
728 else if (GET_CODE (value) == CONST_INT)
729 value1 = gen_int_mode (INTVAL (value), maxmode);
730 else
731 /* Parse phase is supposed to make VALUE's data type
732 match that of the component reference, which is a type
733 at least as wide as the field; so VALUE should have
734 a mode that corresponds to that type. */
735 gcc_assert (CONSTANT_P (value));
738 /* If this machine's insv insists on a register,
739 get VALUE1 into a register. */
740 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
741 (value1, maxmode)))
742 value1 = force_reg (maxmode, value1);
744 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
745 if (pat)
746 emit_insn (pat);
747 else
749 delete_insns_since (last);
750 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
753 else
754 insv_loses:
755 /* Insv is not available; store using shifts and boolean ops. */
756 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
757 return value;
760 /* Use shifts and boolean operations to store VALUE
761 into a bit field of width BITSIZE
762 in a memory location specified by OP0 except offset by OFFSET bytes.
763 (OFFSET must be 0 if OP0 is a register.)
764 The field starts at position BITPOS within the byte.
765 (If OP0 is a register, it may be a full word or a narrower mode,
766 but BITPOS still counts within a full word,
767 which is significant on bigendian machines.) */
769 static void
770 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
771 unsigned HOST_WIDE_INT bitsize,
772 unsigned HOST_WIDE_INT bitpos, rtx value)
774 enum machine_mode mode;
775 unsigned int total_bits = BITS_PER_WORD;
776 rtx subtarget, temp;
777 int all_zero = 0;
778 int all_one = 0;
780 /* There is a case not handled here:
781 a structure with a known alignment of just a halfword
782 and a field split across two aligned halfwords within the structure.
783 Or likewise a structure with a known alignment of just a byte
784 and a field split across two bytes.
785 Such cases are not supposed to be able to occur. */
787 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
789 gcc_assert (!offset);
790 /* Special treatment for a bit field split across two registers. */
791 if (bitsize + bitpos > BITS_PER_WORD)
793 store_split_bit_field (op0, bitsize, bitpos, value);
794 return;
797 else
799 /* Get the proper mode to use for this field. We want a mode that
800 includes the entire field. If such a mode would be larger than
801 a word, we won't be doing the extraction the normal way.
802 We don't want a mode bigger than the destination. */
804 mode = GET_MODE (op0);
805 if (GET_MODE_BITSIZE (mode) == 0
806 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
807 mode = word_mode;
808 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
809 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
811 if (mode == VOIDmode)
813 /* The only way this should occur is if the field spans word
814 boundaries. */
815 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
816 value);
817 return;
820 total_bits = GET_MODE_BITSIZE (mode);
822 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
823 be in the range 0 to total_bits-1, and put any excess bytes in
824 OFFSET. */
825 if (bitpos >= total_bits)
827 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
828 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
829 * BITS_PER_UNIT);
832 /* Get ref to an aligned byte, halfword, or word containing the field.
833 Adjust BITPOS to be position within a word,
834 and OFFSET to be the offset of that word.
835 Then alter OP0 to refer to that word. */
836 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
837 offset -= (offset % (total_bits / BITS_PER_UNIT));
838 op0 = adjust_address (op0, mode, offset);
841 mode = GET_MODE (op0);
843 /* Now MODE is either some integral mode for a MEM as OP0,
844 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
845 The bit field is contained entirely within OP0.
846 BITPOS is the starting bit number within OP0.
847 (OP0's mode may actually be narrower than MODE.) */
849 if (BYTES_BIG_ENDIAN)
850 /* BITPOS is the distance between our msb
851 and that of the containing datum.
852 Convert it to the distance from the lsb. */
853 bitpos = total_bits - bitsize - bitpos;
855 /* Now BITPOS is always the distance between our lsb
856 and that of OP0. */
858 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
859 we must first convert its mode to MODE. */
861 if (GET_CODE (value) == CONST_INT)
863 HOST_WIDE_INT v = INTVAL (value);
865 if (bitsize < HOST_BITS_PER_WIDE_INT)
866 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
868 if (v == 0)
869 all_zero = 1;
870 else if ((bitsize < HOST_BITS_PER_WIDE_INT
871 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
872 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
873 all_one = 1;
875 value = lshift_value (mode, value, bitpos, bitsize);
877 else
879 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
880 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
882 if (GET_MODE (value) != mode)
884 if ((REG_P (value) || GET_CODE (value) == SUBREG)
885 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
886 value = gen_lowpart (mode, value);
887 else
888 value = convert_to_mode (mode, value, 1);
891 if (must_and)
892 value = expand_binop (mode, and_optab, value,
893 mask_rtx (mode, 0, bitsize, 0),
894 NULL_RTX, 1, OPTAB_LIB_WIDEN);
895 if (bitpos > 0)
896 value = expand_shift (LSHIFT_EXPR, mode, value,
897 build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
900 /* Now clear the chosen bits in OP0,
901 except that if VALUE is -1 we need not bother. */
903 subtarget = op0;
905 if (! all_one)
907 temp = expand_binop (mode, and_optab, op0,
908 mask_rtx (mode, bitpos, bitsize, 1),
909 subtarget, 1, OPTAB_LIB_WIDEN);
910 subtarget = temp;
912 else
913 temp = op0;
915 /* Now logical-or VALUE into OP0, unless it is zero. */
917 if (! all_zero)
918 temp = expand_binop (mode, ior_optab, temp, value,
919 subtarget, 1, OPTAB_LIB_WIDEN);
920 if (op0 != temp)
921 emit_move_insn (op0, temp);
924 /* Store a bit field that is split across multiple accessible memory objects.
926 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
927 BITSIZE is the field width; BITPOS the position of its first bit
928 (within the word).
929 VALUE is the value to store.
931 This does not yet handle fields wider than BITS_PER_WORD. */
933 static void
934 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
935 unsigned HOST_WIDE_INT bitpos, rtx value)
937 unsigned int unit;
938 unsigned int bitsdone = 0;
940 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
941 much at a time. */
942 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
943 unit = BITS_PER_WORD;
944 else
945 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
947 /* If VALUE is a constant other than a CONST_INT, get it into a register in
948 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
949 that VALUE might be a floating-point constant. */
950 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
952 rtx word = gen_lowpart_common (word_mode, value);
954 if (word && (value != word))
955 value = word;
956 else
957 value = gen_lowpart_common (word_mode,
958 force_reg (GET_MODE (value) != VOIDmode
959 ? GET_MODE (value)
960 : word_mode, value));
963 while (bitsdone < bitsize)
965 unsigned HOST_WIDE_INT thissize;
966 rtx part, word;
967 unsigned HOST_WIDE_INT thispos;
968 unsigned HOST_WIDE_INT offset;
970 offset = (bitpos + bitsdone) / unit;
971 thispos = (bitpos + bitsdone) % unit;
973 /* THISSIZE must not overrun a word boundary. Otherwise,
974 store_fixed_bit_field will call us again, and we will mutually
975 recurse forever. */
976 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
977 thissize = MIN (thissize, unit - thispos);
979 if (BYTES_BIG_ENDIAN)
981 int total_bits;
983 /* We must do an endian conversion exactly the same way as it is
984 done in extract_bit_field, so that the two calls to
985 extract_fixed_bit_field will have comparable arguments. */
986 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
987 total_bits = BITS_PER_WORD;
988 else
989 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
991 /* Fetch successively less significant portions. */
992 if (GET_CODE (value) == CONST_INT)
993 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
994 >> (bitsize - bitsdone - thissize))
995 & (((HOST_WIDE_INT) 1 << thissize) - 1));
996 else
997 /* The args are chosen so that the last part includes the
998 lsb. Give extract_bit_field the value it needs (with
999 endianness compensation) to fetch the piece we want. */
1000 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1001 total_bits - bitsize + bitsdone,
1002 NULL_RTX, 1);
1004 else
1006 /* Fetch successively more significant portions. */
1007 if (GET_CODE (value) == CONST_INT)
1008 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1009 >> bitsdone)
1010 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1011 else
1012 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1013 bitsdone, NULL_RTX, 1);
1016 /* If OP0 is a register, then handle OFFSET here.
1018 When handling multiword bitfields, extract_bit_field may pass
1019 down a word_mode SUBREG of a larger REG for a bitfield that actually
1020 crosses a word boundary. Thus, for a SUBREG, we must find
1021 the current word starting from the base register. */
1022 if (GET_CODE (op0) == SUBREG)
1024 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1025 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1026 GET_MODE (SUBREG_REG (op0)));
1027 offset = 0;
1029 else if (REG_P (op0))
1031 word = operand_subword_force (op0, offset, GET_MODE (op0));
1032 offset = 0;
1034 else
1035 word = op0;
1037 /* OFFSET is in UNITs, and UNIT is in bits.
1038 store_fixed_bit_field wants offset in bytes. */
1039 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1040 thispos, part);
1041 bitsdone += thissize;
1045 /* Generate code to extract a byte-field from STR_RTX
1046 containing BITSIZE bits, starting at BITNUM,
1047 and put it in TARGET if possible (if TARGET is nonzero).
1048 Regardless of TARGET, we return the rtx for where the value is placed.
1050 STR_RTX is the structure containing the byte (a REG or MEM).
1051 UNSIGNEDP is nonzero if this is an unsigned bit field.
1052 MODE is the natural mode of the field value once extracted.
1053 TMODE is the mode the caller would like the value to have;
1054 but the value may be returned with type MODE instead.
1056 TOTAL_SIZE is the size in bytes of the containing structure,
1057 or -1 if varying.
1059 If a TARGET is specified and we can store in it at no extra cost,
1060 we do so, and return TARGET.
1061 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1062 if they are equally easy. */
1065 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1066 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1067 enum machine_mode mode, enum machine_mode tmode)
1069 unsigned int unit
1070 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1071 unsigned HOST_WIDE_INT offset, bitpos;
1072 rtx op0 = str_rtx;
1073 rtx spec_target = target;
1074 rtx spec_target_subreg = 0;
1075 enum machine_mode int_mode;
1076 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1077 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1078 enum machine_mode mode1;
1079 int byte_offset;
1081 if (tmode == VOIDmode)
1082 tmode = mode;
1084 while (GET_CODE (op0) == SUBREG)
1086 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1087 op0 = SUBREG_REG (op0);
1090 /* If we have an out-of-bounds access to a register, just return an
1091 uninitialized register of the required mode. This can occur if the
1092 source code contains an out-of-bounds access to a small array. */
1093 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1094 return gen_reg_rtx (tmode);
1096 if (REG_P (op0)
1097 && mode == GET_MODE (op0)
1098 && bitnum == 0
1099 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1101 /* We're trying to extract a full register from itself. */
1102 return op0;
1105 /* Use vec_extract patterns for extracting parts of vectors whenever
1106 available. */
1107 if (VECTOR_MODE_P (GET_MODE (op0))
1108 && !MEM_P (op0)
1109 && (vec_extract_optab->handlers[GET_MODE (op0)].insn_code
1110 != CODE_FOR_nothing)
1111 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1112 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1114 enum machine_mode outermode = GET_MODE (op0);
1115 enum machine_mode innermode = GET_MODE_INNER (outermode);
1116 int icode = (int) vec_extract_optab->handlers[outermode].insn_code;
1117 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1118 rtx rtxpos = GEN_INT (pos);
1119 rtx src = op0;
1120 rtx dest = NULL, pat, seq;
1121 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1122 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1123 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1125 if (innermode == tmode || innermode == mode)
1126 dest = target;
1128 if (!dest)
1129 dest = gen_reg_rtx (innermode);
1131 start_sequence ();
1133 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1134 dest = copy_to_mode_reg (mode0, dest);
1136 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1137 src = copy_to_mode_reg (mode1, src);
1139 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1140 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1142 /* We could handle this, but we should always be called with a pseudo
1143 for our targets and all insns should take them as outputs. */
1144 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1145 && (*insn_data[icode].operand[1].predicate) (src, mode1)
1146 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1148 pat = GEN_FCN (icode) (dest, src, rtxpos);
1149 seq = get_insns ();
1150 end_sequence ();
1151 if (pat)
1153 emit_insn (seq);
1154 emit_insn (pat);
1155 return dest;
1159 /* Make sure we are playing with integral modes. Pun with subregs
1160 if we aren't. */
1162 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1163 if (imode != GET_MODE (op0))
1165 if (MEM_P (op0))
1166 op0 = adjust_address (op0, imode, 0);
1167 else
1169 gcc_assert (imode != BLKmode);
1170 op0 = gen_lowpart (imode, op0);
1172 /* If we got a SUBREG, force it into a register since we
1173 aren't going to be able to do another SUBREG on it. */
1174 if (GET_CODE (op0) == SUBREG)
1175 op0 = force_reg (imode, op0);
1180 /* We may be accessing data outside the field, which means
1181 we can alias adjacent data. */
1182 if (MEM_P (op0))
1184 op0 = shallow_copy_rtx (op0);
1185 set_mem_alias_set (op0, 0);
1186 set_mem_expr (op0, 0);
1189 /* Extraction of a full-word or multi-word value from a structure
1190 in a register or aligned memory can be done with just a SUBREG.
1191 A subword value in the least significant part of a register
1192 can also be extracted with a SUBREG. For this, we need the
1193 byte offset of the value in op0. */
1195 bitpos = bitnum % unit;
1196 offset = bitnum / unit;
1197 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1199 /* If OP0 is a register, BITPOS must count within a word.
1200 But as we have it, it counts within whatever size OP0 now has.
1201 On a bigendian machine, these are not the same, so convert. */
1202 if (BYTES_BIG_ENDIAN
1203 && !MEM_P (op0)
1204 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1205 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1207 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1208 If that's wrong, the solution is to test for it and set TARGET to 0
1209 if needed. */
1211 /* Only scalar integer modes can be converted via subregs. There is an
1212 additional problem for FP modes here in that they can have a precision
1213 which is different from the size. mode_for_size uses precision, but
1214 we want a mode based on the size, so we must avoid calling it for FP
1215 modes. */
1216 mode1 = (SCALAR_INT_MODE_P (tmode)
1217 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1218 : mode);
1220 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1221 && bitpos % BITS_PER_WORD == 0)
1222 || (mode1 != BLKmode
1223 /* ??? The big endian test here is wrong. This is correct
1224 if the value is in a register, and if mode_for_size is not
1225 the same mode as op0. This causes us to get unnecessarily
1226 inefficient code from the Thumb port when -mbig-endian. */
1227 && (BYTES_BIG_ENDIAN
1228 ? bitpos + bitsize == BITS_PER_WORD
1229 : bitpos == 0)))
1230 && ((!MEM_P (op0)
1231 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1232 GET_MODE_BITSIZE (GET_MODE (op0)))
1233 && GET_MODE_SIZE (mode1) != 0
1234 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1235 || (MEM_P (op0)
1236 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1237 || (offset * BITS_PER_UNIT % bitsize == 0
1238 && MEM_ALIGN (op0) % bitsize == 0)))))
1240 if (mode1 != GET_MODE (op0))
1242 if (MEM_P (op0))
1243 op0 = adjust_address (op0, mode1, offset);
1244 else
1246 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1247 byte_offset);
1248 if (sub == NULL)
1249 goto no_subreg_mode_swap;
1250 op0 = sub;
1253 if (mode1 != mode)
1254 return convert_to_mode (tmode, op0, unsignedp);
1255 return op0;
1257 no_subreg_mode_swap:
1259 /* Handle fields bigger than a word. */
1261 if (bitsize > BITS_PER_WORD)
1263 /* Here we transfer the words of the field
1264 in the order least significant first.
1265 This is because the most significant word is the one which may
1266 be less than full. */
1268 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1269 unsigned int i;
1271 if (target == 0 || !REG_P (target))
1272 target = gen_reg_rtx (mode);
1274 /* Indicate for flow that the entire target reg is being set. */
1275 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1277 for (i = 0; i < nwords; i++)
1279 /* If I is 0, use the low-order word in both field and target;
1280 if I is 1, use the next to lowest word; and so on. */
1281 /* Word number in TARGET to use. */
1282 unsigned int wordnum
1283 = (WORDS_BIG_ENDIAN
1284 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1285 : i);
1286 /* Offset from start of field in OP0. */
1287 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1288 ? MAX (0, ((int) bitsize - ((int) i + 1)
1289 * (int) BITS_PER_WORD))
1290 : (int) i * BITS_PER_WORD);
1291 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1292 rtx result_part
1293 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1294 bitsize - i * BITS_PER_WORD),
1295 bitnum + bit_offset, 1, target_part, mode,
1296 word_mode);
1298 gcc_assert (target_part);
1300 if (result_part != target_part)
1301 emit_move_insn (target_part, result_part);
1304 if (unsignedp)
1306 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1307 need to be zero'd out. */
1308 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1310 unsigned int i, total_words;
1312 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1313 for (i = nwords; i < total_words; i++)
1314 emit_move_insn
1315 (operand_subword (target,
1316 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1317 1, VOIDmode),
1318 const0_rtx);
1320 return target;
1323 /* Signed bit field: sign-extend with two arithmetic shifts. */
1324 target = expand_shift (LSHIFT_EXPR, mode, target,
1325 build_int_cst (NULL_TREE,
1326 GET_MODE_BITSIZE (mode) - bitsize),
1327 NULL_RTX, 0);
1328 return expand_shift (RSHIFT_EXPR, mode, target,
1329 build_int_cst (NULL_TREE,
1330 GET_MODE_BITSIZE (mode) - bitsize),
1331 NULL_RTX, 0);
1334 /* From here on we know the desired field is smaller than a word. */
1336 /* Check if there is a correspondingly-sized integer field, so we can
1337 safely extract it as one size of integer, if necessary; then
1338 truncate or extend to the size that is wanted; then use SUBREGs or
1339 convert_to_mode to get one of the modes we really wanted. */
1341 int_mode = int_mode_for_mode (tmode);
1342 if (int_mode == BLKmode)
1343 int_mode = int_mode_for_mode (mode);
1344 /* Should probably push op0 out to memory and then do a load. */
1345 gcc_assert (int_mode != BLKmode);
1347 /* OFFSET is the number of words or bytes (UNIT says which)
1348 from STR_RTX to the first word or byte containing part of the field. */
1349 if (!MEM_P (op0))
1351 if (offset != 0
1352 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1354 if (!REG_P (op0))
1355 op0 = copy_to_reg (op0);
1356 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1357 op0, (offset * UNITS_PER_WORD));
1359 offset = 0;
1362 /* Now OFFSET is nonzero only for memory operands. */
1364 if (unsignedp)
1366 if (HAVE_extzv
1367 && bitsize > 0
1368 && GET_MODE_BITSIZE (extzv_mode) >= bitsize
1369 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1370 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1372 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1373 rtx bitsize_rtx, bitpos_rtx;
1374 rtx last = get_last_insn ();
1375 rtx xop0 = op0;
1376 rtx xtarget = target;
1377 rtx xspec_target = spec_target;
1378 rtx xspec_target_subreg = spec_target_subreg;
1379 rtx pat;
1380 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1382 if (MEM_P (xop0))
1384 int save_volatile_ok = volatile_ok;
1385 volatile_ok = 1;
1387 /* Is the memory operand acceptable? */
1388 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1389 (xop0, GET_MODE (xop0))))
1391 /* No, load into a reg and extract from there. */
1392 enum machine_mode bestmode;
1394 /* Get the mode to use for inserting into this field. If
1395 OP0 is BLKmode, get the smallest mode consistent with the
1396 alignment. If OP0 is a non-BLKmode object that is no
1397 wider than MAXMODE, use its mode. Otherwise, use the
1398 smallest mode containing the field. */
1400 if (GET_MODE (xop0) == BLKmode
1401 || (GET_MODE_SIZE (GET_MODE (op0))
1402 > GET_MODE_SIZE (maxmode)))
1403 bestmode = get_best_mode (bitsize, bitnum,
1404 MEM_ALIGN (xop0), maxmode,
1405 MEM_VOLATILE_P (xop0));
1406 else
1407 bestmode = GET_MODE (xop0);
1409 if (bestmode == VOIDmode
1410 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1411 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1412 goto extzv_loses;
1414 /* Compute offset as multiple of this unit,
1415 counting in bytes. */
1416 unit = GET_MODE_BITSIZE (bestmode);
1417 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1418 xbitpos = bitnum % unit;
1419 xop0 = adjust_address (xop0, bestmode, xoffset);
1421 /* Make sure register is big enough for the whole field. */
1422 if (xoffset * BITS_PER_UNIT + unit
1423 < offset * BITS_PER_UNIT + bitsize)
1424 goto extzv_loses;
1426 /* Fetch it to a register in that size. */
1427 xop0 = force_reg (bestmode, xop0);
1429 /* XBITPOS counts within UNIT, which is what is expected. */
1431 else
1432 /* Get ref to first byte containing part of the field. */
1433 xop0 = adjust_address (xop0, byte_mode, xoffset);
1435 volatile_ok = save_volatile_ok;
1438 /* If op0 is a register, we need it in MAXMODE (which is usually
1439 SImode). to make it acceptable to the format of extzv. */
1440 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1441 goto extzv_loses;
1442 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1443 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1445 /* On big-endian machines, we count bits from the most significant.
1446 If the bit field insn does not, we must invert. */
1447 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1448 xbitpos = unit - bitsize - xbitpos;
1450 /* Now convert from counting within UNIT to counting in MAXMODE. */
1451 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1452 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1454 unit = GET_MODE_BITSIZE (maxmode);
1456 if (xtarget == 0)
1457 xtarget = xspec_target = gen_reg_rtx (tmode);
1459 if (GET_MODE (xtarget) != maxmode)
1461 if (REG_P (xtarget))
1463 int wider = (GET_MODE_SIZE (maxmode)
1464 > GET_MODE_SIZE (GET_MODE (xtarget)));
1465 xtarget = gen_lowpart (maxmode, xtarget);
1466 if (wider)
1467 xspec_target_subreg = xtarget;
1469 else
1470 xtarget = gen_reg_rtx (maxmode);
1473 /* If this machine's extzv insists on a register target,
1474 make sure we have one. */
1475 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1476 (xtarget, maxmode)))
1477 xtarget = gen_reg_rtx (maxmode);
1479 bitsize_rtx = GEN_INT (bitsize);
1480 bitpos_rtx = GEN_INT (xbitpos);
1482 pat = gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx);
1483 if (pat)
1485 emit_insn (pat);
1486 target = xtarget;
1487 spec_target = xspec_target;
1488 spec_target_subreg = xspec_target_subreg;
1490 else
1492 delete_insns_since (last);
1493 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1494 bitpos, target, 1);
1497 else
1498 extzv_loses:
1499 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1500 bitpos, target, 1);
1502 else
1504 if (HAVE_extv
1505 && bitsize > 0
1506 && GET_MODE_BITSIZE (extv_mode) >= bitsize
1507 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1508 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1510 int xbitpos = bitpos, xoffset = offset;
1511 rtx bitsize_rtx, bitpos_rtx;
1512 rtx last = get_last_insn ();
1513 rtx xop0 = op0, xtarget = target;
1514 rtx xspec_target = spec_target;
1515 rtx xspec_target_subreg = spec_target_subreg;
1516 rtx pat;
1517 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1519 if (MEM_P (xop0))
1521 /* Is the memory operand acceptable? */
1522 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1523 (xop0, GET_MODE (xop0))))
1525 /* No, load into a reg and extract from there. */
1526 enum machine_mode bestmode;
1528 /* Get the mode to use for inserting into this field. If
1529 OP0 is BLKmode, get the smallest mode consistent with the
1530 alignment. If OP0 is a non-BLKmode object that is no
1531 wider than MAXMODE, use its mode. Otherwise, use the
1532 smallest mode containing the field. */
1534 if (GET_MODE (xop0) == BLKmode
1535 || (GET_MODE_SIZE (GET_MODE (op0))
1536 > GET_MODE_SIZE (maxmode)))
1537 bestmode = get_best_mode (bitsize, bitnum,
1538 MEM_ALIGN (xop0), maxmode,
1539 MEM_VOLATILE_P (xop0));
1540 else
1541 bestmode = GET_MODE (xop0);
1543 if (bestmode == VOIDmode
1544 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1545 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1546 goto extv_loses;
1548 /* Compute offset as multiple of this unit,
1549 counting in bytes. */
1550 unit = GET_MODE_BITSIZE (bestmode);
1551 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1552 xbitpos = bitnum % unit;
1553 xop0 = adjust_address (xop0, bestmode, xoffset);
1555 /* Make sure register is big enough for the whole field. */
1556 if (xoffset * BITS_PER_UNIT + unit
1557 < offset * BITS_PER_UNIT + bitsize)
1558 goto extv_loses;
1560 /* Fetch it to a register in that size. */
1561 xop0 = force_reg (bestmode, xop0);
1563 /* XBITPOS counts within UNIT, which is what is expected. */
1565 else
1566 /* Get ref to first byte containing part of the field. */
1567 xop0 = adjust_address (xop0, byte_mode, xoffset);
1570 /* If op0 is a register, we need it in MAXMODE (which is usually
1571 SImode) to make it acceptable to the format of extv. */
1572 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1573 goto extv_loses;
1574 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1575 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1577 /* On big-endian machines, we count bits from the most significant.
1578 If the bit field insn does not, we must invert. */
1579 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1580 xbitpos = unit - bitsize - xbitpos;
1582 /* XBITPOS counts within a size of UNIT.
1583 Adjust to count within a size of MAXMODE. */
1584 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1585 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1587 unit = GET_MODE_BITSIZE (maxmode);
1589 if (xtarget == 0)
1590 xtarget = xspec_target = gen_reg_rtx (tmode);
1592 if (GET_MODE (xtarget) != maxmode)
1594 if (REG_P (xtarget))
1596 int wider = (GET_MODE_SIZE (maxmode)
1597 > GET_MODE_SIZE (GET_MODE (xtarget)));
1598 xtarget = gen_lowpart (maxmode, xtarget);
1599 if (wider)
1600 xspec_target_subreg = xtarget;
1602 else
1603 xtarget = gen_reg_rtx (maxmode);
1606 /* If this machine's extv insists on a register target,
1607 make sure we have one. */
1608 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1609 (xtarget, maxmode)))
1610 xtarget = gen_reg_rtx (maxmode);
1612 bitsize_rtx = GEN_INT (bitsize);
1613 bitpos_rtx = GEN_INT (xbitpos);
1615 pat = gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx);
1616 if (pat)
1618 emit_insn (pat);
1619 target = xtarget;
1620 spec_target = xspec_target;
1621 spec_target_subreg = xspec_target_subreg;
1623 else
1625 delete_insns_since (last);
1626 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1627 bitpos, target, 0);
1630 else
1631 extv_loses:
1632 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1633 bitpos, target, 0);
1635 if (target == spec_target)
1636 return target;
1637 if (target == spec_target_subreg)
1638 return spec_target;
1639 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1641 /* If the target mode is not a scalar integral, first convert to the
1642 integer mode of that size and then access it as a floating-point
1643 value via a SUBREG. */
1644 if (!SCALAR_INT_MODE_P (tmode))
1646 enum machine_mode smode
1647 = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1648 target = convert_to_mode (smode, target, unsignedp);
1649 target = force_reg (smode, target);
1650 return gen_lowpart (tmode, target);
1653 return convert_to_mode (tmode, target, unsignedp);
1655 return target;
1658 /* Extract a bit field using shifts and boolean operations
1659 Returns an rtx to represent the value.
1660 OP0 addresses a register (word) or memory (byte).
1661 BITPOS says which bit within the word or byte the bit field starts in.
1662 OFFSET says how many bytes farther the bit field starts;
1663 it is 0 if OP0 is a register.
1664 BITSIZE says how many bits long the bit field is.
1665 (If OP0 is a register, it may be narrower than a full word,
1666 but BITPOS still counts within a full word,
1667 which is significant on bigendian machines.)
1669 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1670 If TARGET is nonzero, attempts to store the value there
1671 and return TARGET, but this is not guaranteed.
1672 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1674 static rtx
1675 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1676 unsigned HOST_WIDE_INT offset,
1677 unsigned HOST_WIDE_INT bitsize,
1678 unsigned HOST_WIDE_INT bitpos, rtx target,
1679 int unsignedp)
1681 unsigned int total_bits = BITS_PER_WORD;
1682 enum machine_mode mode;
1684 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1686 /* Special treatment for a bit field split across two registers. */
1687 if (bitsize + bitpos > BITS_PER_WORD)
1688 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1690 else
1692 /* Get the proper mode to use for this field. We want a mode that
1693 includes the entire field. If such a mode would be larger than
1694 a word, we won't be doing the extraction the normal way. */
1696 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1697 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1699 if (mode == VOIDmode)
1700 /* The only way this should occur is if the field spans word
1701 boundaries. */
1702 return extract_split_bit_field (op0, bitsize,
1703 bitpos + offset * BITS_PER_UNIT,
1704 unsignedp);
1706 total_bits = GET_MODE_BITSIZE (mode);
1708 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1709 be in the range 0 to total_bits-1, and put any excess bytes in
1710 OFFSET. */
1711 if (bitpos >= total_bits)
1713 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1714 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1715 * BITS_PER_UNIT);
1718 /* Get ref to an aligned byte, halfword, or word containing the field.
1719 Adjust BITPOS to be position within a word,
1720 and OFFSET to be the offset of that word.
1721 Then alter OP0 to refer to that word. */
1722 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1723 offset -= (offset % (total_bits / BITS_PER_UNIT));
1724 op0 = adjust_address (op0, mode, offset);
1727 mode = GET_MODE (op0);
1729 if (BYTES_BIG_ENDIAN)
1730 /* BITPOS is the distance between our msb and that of OP0.
1731 Convert it to the distance from the lsb. */
1732 bitpos = total_bits - bitsize - bitpos;
1734 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1735 We have reduced the big-endian case to the little-endian case. */
1737 if (unsignedp)
1739 if (bitpos)
1741 /* If the field does not already start at the lsb,
1742 shift it so it does. */
1743 tree amount = build_int_cst (NULL_TREE, bitpos);
1744 /* Maybe propagate the target for the shift. */
1745 /* But not if we will return it--could confuse integrate.c. */
1746 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1747 if (tmode != mode) subtarget = 0;
1748 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1750 /* Convert the value to the desired mode. */
1751 if (mode != tmode)
1752 op0 = convert_to_mode (tmode, op0, 1);
1754 /* Unless the msb of the field used to be the msb when we shifted,
1755 mask out the upper bits. */
1757 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1758 return expand_binop (GET_MODE (op0), and_optab, op0,
1759 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1760 target, 1, OPTAB_LIB_WIDEN);
1761 return op0;
1764 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1765 then arithmetic-shift its lsb to the lsb of the word. */
1766 op0 = force_reg (mode, op0);
1767 if (mode != tmode)
1768 target = 0;
1770 /* Find the narrowest integer mode that contains the field. */
1772 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1773 mode = GET_MODE_WIDER_MODE (mode))
1774 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1776 op0 = convert_to_mode (mode, op0, 0);
1777 break;
1780 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1782 tree amount
1783 = build_int_cst (NULL_TREE,
1784 GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1785 /* Maybe propagate the target for the shift. */
1786 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1787 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1790 return expand_shift (RSHIFT_EXPR, mode, op0,
1791 build_int_cst (NULL_TREE,
1792 GET_MODE_BITSIZE (mode) - bitsize),
1793 target, 0);
1796 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1797 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1798 complement of that if COMPLEMENT. The mask is truncated if
1799 necessary to the width of mode MODE. The mask is zero-extended if
1800 BITSIZE+BITPOS is too small for MODE. */
1802 static rtx
1803 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1805 HOST_WIDE_INT masklow, maskhigh;
1807 if (bitsize == 0)
1808 masklow = 0;
1809 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1810 masklow = (HOST_WIDE_INT) -1 << bitpos;
1811 else
1812 masklow = 0;
1814 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1815 masklow &= ((unsigned HOST_WIDE_INT) -1
1816 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1818 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1819 maskhigh = -1;
1820 else
1821 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1823 if (bitsize == 0)
1824 maskhigh = 0;
1825 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1826 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1827 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1828 else
1829 maskhigh = 0;
1831 if (complement)
1833 maskhigh = ~maskhigh;
1834 masklow = ~masklow;
1837 return immed_double_const (masklow, maskhigh, mode);
1840 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1841 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1843 static rtx
1844 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1846 unsigned HOST_WIDE_INT v = INTVAL (value);
1847 HOST_WIDE_INT low, high;
1849 if (bitsize < HOST_BITS_PER_WIDE_INT)
1850 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1852 if (bitpos < HOST_BITS_PER_WIDE_INT)
1854 low = v << bitpos;
1855 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1857 else
1859 low = 0;
1860 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1863 return immed_double_const (low, high, mode);
1866 /* Extract a bit field from a memory by forcing the alignment of the
1867 memory. This efficient only if the field spans at least 4 boundaries.
1869 OP0 is the MEM.
1870 BITSIZE is the field width; BITPOS is the position of the first bit.
1871 UNSIGNEDP is true if the result should be zero-extended. */
1873 static rtx
1874 extract_force_align_mem_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1875 unsigned HOST_WIDE_INT bitpos,
1876 int unsignedp)
1878 enum machine_mode mode, dmode;
1879 unsigned int m_bitsize, m_size;
1880 unsigned int sign_shift_up, sign_shift_dn;
1881 rtx base, a1, a2, v1, v2, comb, shift, result, start;
1883 /* Choose a mode that will fit BITSIZE. */
1884 mode = smallest_mode_for_size (bitsize, MODE_INT);
1885 m_size = GET_MODE_SIZE (mode);
1886 m_bitsize = GET_MODE_BITSIZE (mode);
1888 /* Choose a mode twice as wide. Fail if no such mode exists. */
1889 dmode = mode_for_size (m_bitsize * 2, MODE_INT, false);
1890 if (dmode == BLKmode)
1891 return NULL;
1893 do_pending_stack_adjust ();
1894 start = get_last_insn ();
1896 /* At the end, we'll need an additional shift to deal with sign/zero
1897 extension. By default this will be a left+right shift of the
1898 appropriate size. But we may be able to eliminate one of them. */
1899 sign_shift_up = sign_shift_dn = m_bitsize - bitsize;
1901 if (STRICT_ALIGNMENT)
1903 base = plus_constant (XEXP (op0, 0), bitpos / BITS_PER_UNIT);
1904 bitpos %= BITS_PER_UNIT;
1906 /* We load two values to be concatenate. There's an edge condition
1907 that bears notice -- an aligned value at the end of a page can
1908 only load one value lest we segfault. So the two values we load
1909 are at "base & -size" and "(base + size - 1) & -size". If base
1910 is unaligned, the addresses will be aligned and sequential; if
1911 base is aligned, the addresses will both be equal to base. */
1913 a1 = expand_simple_binop (Pmode, AND, force_operand (base, NULL),
1914 GEN_INT (-(HOST_WIDE_INT)m_size),
1915 NULL, true, OPTAB_LIB_WIDEN);
1916 mark_reg_pointer (a1, m_bitsize);
1917 v1 = gen_rtx_MEM (mode, a1);
1918 set_mem_align (v1, m_bitsize);
1919 v1 = force_reg (mode, validize_mem (v1));
1921 a2 = plus_constant (base, GET_MODE_SIZE (mode) - 1);
1922 a2 = expand_simple_binop (Pmode, AND, force_operand (a2, NULL),
1923 GEN_INT (-(HOST_WIDE_INT)m_size),
1924 NULL, true, OPTAB_LIB_WIDEN);
1925 v2 = gen_rtx_MEM (mode, a2);
1926 set_mem_align (v2, m_bitsize);
1927 v2 = force_reg (mode, validize_mem (v2));
1929 /* Combine these two values into a double-word value. */
1930 if (m_bitsize == BITS_PER_WORD)
1932 comb = gen_reg_rtx (dmode);
1933 emit_insn (gen_rtx_CLOBBER (VOIDmode, comb));
1934 emit_move_insn (gen_rtx_SUBREG (mode, comb, 0), v1);
1935 emit_move_insn (gen_rtx_SUBREG (mode, comb, m_size), v2);
1937 else
1939 if (BYTES_BIG_ENDIAN)
1940 comb = v1, v1 = v2, v2 = comb;
1941 v1 = convert_modes (dmode, mode, v1, true);
1942 if (v1 == NULL)
1943 goto fail;
1944 v2 = convert_modes (dmode, mode, v2, true);
1945 v2 = expand_simple_binop (dmode, ASHIFT, v2, GEN_INT (m_bitsize),
1946 NULL, true, OPTAB_LIB_WIDEN);
1947 if (v2 == NULL)
1948 goto fail;
1949 comb = expand_simple_binop (dmode, IOR, v1, v2, NULL,
1950 true, OPTAB_LIB_WIDEN);
1951 if (comb == NULL)
1952 goto fail;
1955 shift = expand_simple_binop (Pmode, AND, base, GEN_INT (m_size - 1),
1956 NULL, true, OPTAB_LIB_WIDEN);
1957 shift = expand_mult (Pmode, shift, GEN_INT (BITS_PER_UNIT), NULL, 1);
1959 if (bitpos != 0)
1961 if (sign_shift_up <= bitpos)
1962 bitpos -= sign_shift_up, sign_shift_up = 0;
1963 shift = expand_simple_binop (Pmode, PLUS, shift, GEN_INT (bitpos),
1964 NULL, true, OPTAB_LIB_WIDEN);
1967 else
1969 unsigned HOST_WIDE_INT offset = bitpos / BITS_PER_UNIT;
1970 bitpos %= BITS_PER_UNIT;
1972 /* When strict alignment is not required, we can just load directly
1973 from memory without masking. If the remaining BITPOS offset is
1974 small enough, we may be able to do all operations in MODE as
1975 opposed to DMODE. */
1976 if (bitpos + bitsize <= m_bitsize)
1977 dmode = mode;
1978 comb = adjust_address (op0, dmode, offset);
1980 if (sign_shift_up <= bitpos)
1981 bitpos -= sign_shift_up, sign_shift_up = 0;
1982 shift = GEN_INT (bitpos);
1985 /* Shift down the double-word such that the requested value is at bit 0. */
1986 if (shift != const0_rtx)
1987 comb = expand_simple_binop (dmode, unsignedp ? LSHIFTRT : ASHIFTRT,
1988 comb, shift, NULL, unsignedp, OPTAB_LIB_WIDEN);
1989 if (comb == NULL)
1990 goto fail;
1992 /* If the field exactly matches MODE, then all we need to do is return the
1993 lowpart. Otherwise, shift to get the sign bits set properly. */
1994 result = force_reg (mode, gen_lowpart (mode, comb));
1996 if (sign_shift_up)
1997 result = expand_simple_binop (mode, ASHIFT, result,
1998 GEN_INT (sign_shift_up),
1999 NULL_RTX, 0, OPTAB_LIB_WIDEN);
2000 if (sign_shift_dn)
2001 result = expand_simple_binop (mode, unsignedp ? LSHIFTRT : ASHIFTRT,
2002 result, GEN_INT (sign_shift_dn),
2003 NULL_RTX, 0, OPTAB_LIB_WIDEN);
2005 return result;
2007 fail:
2008 delete_insns_since (start);
2009 return NULL;
2012 /* Extract a bit field that is split across two words
2013 and return an RTX for the result.
2015 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2016 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2017 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
2019 static rtx
2020 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
2021 unsigned HOST_WIDE_INT bitpos, int unsignedp)
2023 unsigned int unit;
2024 unsigned int bitsdone = 0;
2025 rtx result = NULL_RTX;
2026 int first = 1;
2028 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2029 much at a time. */
2030 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
2031 unit = BITS_PER_WORD;
2032 else
2034 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
2035 if (0 && bitsize / unit > 2)
2037 rtx tmp = extract_force_align_mem_bit_field (op0, bitsize, bitpos,
2038 unsignedp);
2039 if (tmp)
2040 return tmp;
2044 while (bitsdone < bitsize)
2046 unsigned HOST_WIDE_INT thissize;
2047 rtx part, word;
2048 unsigned HOST_WIDE_INT thispos;
2049 unsigned HOST_WIDE_INT offset;
2051 offset = (bitpos + bitsdone) / unit;
2052 thispos = (bitpos + bitsdone) % unit;
2054 /* THISSIZE must not overrun a word boundary. Otherwise,
2055 extract_fixed_bit_field will call us again, and we will mutually
2056 recurse forever. */
2057 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
2058 thissize = MIN (thissize, unit - thispos);
2060 /* If OP0 is a register, then handle OFFSET here.
2062 When handling multiword bitfields, extract_bit_field may pass
2063 down a word_mode SUBREG of a larger REG for a bitfield that actually
2064 crosses a word boundary. Thus, for a SUBREG, we must find
2065 the current word starting from the base register. */
2066 if (GET_CODE (op0) == SUBREG)
2068 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
2069 word = operand_subword_force (SUBREG_REG (op0), word_offset,
2070 GET_MODE (SUBREG_REG (op0)));
2071 offset = 0;
2073 else if (REG_P (op0))
2075 word = operand_subword_force (op0, offset, GET_MODE (op0));
2076 offset = 0;
2078 else
2079 word = op0;
2081 /* Extract the parts in bit-counting order,
2082 whose meaning is determined by BYTES_PER_UNIT.
2083 OFFSET is in UNITs, and UNIT is in bits.
2084 extract_fixed_bit_field wants offset in bytes. */
2085 part = extract_fixed_bit_field (word_mode, word,
2086 offset * unit / BITS_PER_UNIT,
2087 thissize, thispos, 0, 1);
2088 bitsdone += thissize;
2090 /* Shift this part into place for the result. */
2091 if (BYTES_BIG_ENDIAN)
2093 if (bitsize != bitsdone)
2094 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2095 build_int_cst (NULL_TREE, bitsize - bitsdone),
2096 0, 1);
2098 else
2100 if (bitsdone != thissize)
2101 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2102 build_int_cst (NULL_TREE,
2103 bitsdone - thissize), 0, 1);
2106 if (first)
2107 result = part;
2108 else
2109 /* Combine the parts with bitwise or. This works
2110 because we extracted each part as an unsigned bit field. */
2111 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2112 OPTAB_LIB_WIDEN);
2114 first = 0;
2117 /* Unsigned bit field: we are done. */
2118 if (unsignedp)
2119 return result;
2120 /* Signed bit field: sign-extend with two arithmetic shifts. */
2121 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2122 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2123 NULL_RTX, 0);
2124 return expand_shift (RSHIFT_EXPR, word_mode, result,
2125 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2126 NULL_RTX, 0);
2129 /* Add INC into TARGET. */
2131 void
2132 expand_inc (rtx target, rtx inc)
2134 rtx value = expand_binop (GET_MODE (target), add_optab,
2135 target, inc,
2136 target, 0, OPTAB_LIB_WIDEN);
2137 if (value != target)
2138 emit_move_insn (target, value);
2141 /* Subtract DEC from TARGET. */
2143 void
2144 expand_dec (rtx target, rtx dec)
2146 rtx value = expand_binop (GET_MODE (target), sub_optab,
2147 target, dec,
2148 target, 0, OPTAB_LIB_WIDEN);
2149 if (value != target)
2150 emit_move_insn (target, value);
2153 /* Output a shift instruction for expression code CODE,
2154 with SHIFTED being the rtx for the value to shift,
2155 and AMOUNT the tree for the amount to shift by.
2156 Store the result in the rtx TARGET, if that is convenient.
2157 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2158 Return the rtx for where the value is. */
2161 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2162 tree amount, rtx target, int unsignedp)
2164 rtx op1, temp = 0;
2165 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2166 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2167 int try;
2169 /* Previously detected shift-counts computed by NEGATE_EXPR
2170 and shifted in the other direction; but that does not work
2171 on all machines. */
2173 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
2175 if (SHIFT_COUNT_TRUNCATED)
2177 if (GET_CODE (op1) == CONST_INT
2178 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2179 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2180 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2181 % GET_MODE_BITSIZE (mode));
2182 else if (GET_CODE (op1) == SUBREG
2183 && subreg_lowpart_p (op1))
2184 op1 = SUBREG_REG (op1);
2187 if (op1 == const0_rtx)
2188 return shifted;
2190 /* Check whether its cheaper to implement a left shift by a constant
2191 bit count by a sequence of additions. */
2192 if (code == LSHIFT_EXPR
2193 && GET_CODE (op1) == CONST_INT
2194 && INTVAL (op1) > 0
2195 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2196 && shift_cost[mode][INTVAL (op1)] > INTVAL (op1) * add_cost[mode])
2198 int i;
2199 for (i = 0; i < INTVAL (op1); i++)
2201 temp = force_reg (mode, shifted);
2202 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2203 unsignedp, OPTAB_LIB_WIDEN);
2205 return shifted;
2208 for (try = 0; temp == 0 && try < 3; try++)
2210 enum optab_methods methods;
2212 if (try == 0)
2213 methods = OPTAB_DIRECT;
2214 else if (try == 1)
2215 methods = OPTAB_WIDEN;
2216 else
2217 methods = OPTAB_LIB_WIDEN;
2219 if (rotate)
2221 /* Widening does not work for rotation. */
2222 if (methods == OPTAB_WIDEN)
2223 continue;
2224 else if (methods == OPTAB_LIB_WIDEN)
2226 /* If we have been unable to open-code this by a rotation,
2227 do it as the IOR of two shifts. I.e., to rotate A
2228 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2229 where C is the bitsize of A.
2231 It is theoretically possible that the target machine might
2232 not be able to perform either shift and hence we would
2233 be making two libcalls rather than just the one for the
2234 shift (similarly if IOR could not be done). We will allow
2235 this extremely unlikely lossage to avoid complicating the
2236 code below. */
2238 rtx subtarget = target == shifted ? 0 : target;
2239 rtx temp1;
2240 tree type = TREE_TYPE (amount);
2241 tree new_amount = make_tree (type, op1);
2242 tree other_amount
2243 = fold_build2 (MINUS_EXPR, type,
2244 build_int_cst (type, GET_MODE_BITSIZE (mode)),
2245 amount);
2247 shifted = force_reg (mode, shifted);
2249 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2250 mode, shifted, new_amount, 0, 1);
2251 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2252 mode, shifted, other_amount, subtarget, 1);
2253 return expand_binop (mode, ior_optab, temp, temp1, target,
2254 unsignedp, methods);
2257 temp = expand_binop (mode,
2258 left ? rotl_optab : rotr_optab,
2259 shifted, op1, target, unsignedp, methods);
2261 else if (unsignedp)
2262 temp = expand_binop (mode,
2263 left ? ashl_optab : lshr_optab,
2264 shifted, op1, target, unsignedp, methods);
2266 /* Do arithmetic shifts.
2267 Also, if we are going to widen the operand, we can just as well
2268 use an arithmetic right-shift instead of a logical one. */
2269 if (temp == 0 && ! rotate
2270 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2272 enum optab_methods methods1 = methods;
2274 /* If trying to widen a log shift to an arithmetic shift,
2275 don't accept an arithmetic shift of the same size. */
2276 if (unsignedp)
2277 methods1 = OPTAB_MUST_WIDEN;
2279 /* Arithmetic shift */
2281 temp = expand_binop (mode,
2282 left ? ashl_optab : ashr_optab,
2283 shifted, op1, target, unsignedp, methods1);
2286 /* We used to try extzv here for logical right shifts, but that was
2287 only useful for one machine, the VAX, and caused poor code
2288 generation there for lshrdi3, so the code was deleted and a
2289 define_expand for lshrsi3 was added to vax.md. */
2292 gcc_assert (temp);
2293 return temp;
2296 enum alg_code {
2297 alg_unknown,
2298 alg_zero,
2299 alg_m, alg_shift,
2300 alg_add_t_m2,
2301 alg_sub_t_m2,
2302 alg_add_factor,
2303 alg_sub_factor,
2304 alg_add_t2_m,
2305 alg_sub_t2_m,
2306 alg_impossible
2309 /* This structure holds the "cost" of a multiply sequence. The
2310 "cost" field holds the total rtx_cost of every operator in the
2311 synthetic multiplication sequence, hence cost(a op b) is defined
2312 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2313 The "latency" field holds the minimum possible latency of the
2314 synthetic multiply, on a hypothetical infinitely parallel CPU.
2315 This is the critical path, or the maximum height, of the expression
2316 tree which is the sum of rtx_costs on the most expensive path from
2317 any leaf to the root. Hence latency(a op b) is defined as zero for
2318 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2320 struct mult_cost {
2321 short cost; /* Total rtx_cost of the multiplication sequence. */
2322 short latency; /* The latency of the multiplication sequence. */
2325 /* This macro is used to compare a pointer to a mult_cost against an
2326 single integer "rtx_cost" value. This is equivalent to the macro
2327 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2328 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2329 || ((X)->cost == (Y) && (X)->latency < (Y)))
2331 /* This macro is used to compare two pointers to mult_costs against
2332 each other. The macro returns true if X is cheaper than Y.
2333 Currently, the cheaper of two mult_costs is the one with the
2334 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2335 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2336 || ((X)->cost == (Y)->cost \
2337 && (X)->latency < (Y)->latency))
2339 /* This structure records a sequence of operations.
2340 `ops' is the number of operations recorded.
2341 `cost' is their total cost.
2342 The operations are stored in `op' and the corresponding
2343 logarithms of the integer coefficients in `log'.
2345 These are the operations:
2346 alg_zero total := 0;
2347 alg_m total := multiplicand;
2348 alg_shift total := total * coeff
2349 alg_add_t_m2 total := total + multiplicand * coeff;
2350 alg_sub_t_m2 total := total - multiplicand * coeff;
2351 alg_add_factor total := total * coeff + total;
2352 alg_sub_factor total := total * coeff - total;
2353 alg_add_t2_m total := total * coeff + multiplicand;
2354 alg_sub_t2_m total := total * coeff - multiplicand;
2356 The first operand must be either alg_zero or alg_m. */
2358 struct algorithm
2360 struct mult_cost cost;
2361 short ops;
2362 /* The size of the OP and LOG fields are not directly related to the
2363 word size, but the worst-case algorithms will be if we have few
2364 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2365 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2366 in total wordsize operations. */
2367 enum alg_code op[MAX_BITS_PER_WORD];
2368 char log[MAX_BITS_PER_WORD];
2371 /* The entry for our multiplication cache/hash table. */
2372 struct alg_hash_entry {
2373 /* The number we are multiplying by. */
2374 unsigned int t;
2376 /* The mode in which we are multiplying something by T. */
2377 enum machine_mode mode;
2379 /* The best multiplication algorithm for t. */
2380 enum alg_code alg;
2382 /* The cost of multiplication if ALG_CODE is not alg_impossible.
2383 Otherwise, the cost within which multiplication by T is
2384 impossible. */
2385 struct mult_cost cost;
2388 /* The number of cache/hash entries. */
2389 #define NUM_ALG_HASH_ENTRIES 307
2391 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2392 actually a hash table. If we have a collision, that the older
2393 entry is kicked out. */
2394 static struct alg_hash_entry alg_hash[NUM_ALG_HASH_ENTRIES];
2396 /* Indicates the type of fixup needed after a constant multiplication.
2397 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2398 the result should be negated, and ADD_VARIANT means that the
2399 multiplicand should be added to the result. */
2400 enum mult_variant {basic_variant, negate_variant, add_variant};
2402 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2403 const struct mult_cost *, enum machine_mode mode);
2404 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2405 struct algorithm *, enum mult_variant *, int);
2406 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2407 const struct algorithm *, enum mult_variant);
2408 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2409 int, rtx *, int *, int *);
2410 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2411 static rtx extract_high_half (enum machine_mode, rtx);
2412 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2413 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2414 int, int);
2415 /* Compute and return the best algorithm for multiplying by T.
2416 The algorithm must cost less than cost_limit
2417 If retval.cost >= COST_LIMIT, no algorithm was found and all
2418 other field of the returned struct are undefined.
2419 MODE is the machine mode of the multiplication. */
2421 static void
2422 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2423 const struct mult_cost *cost_limit, enum machine_mode mode)
2425 int m;
2426 struct algorithm *alg_in, *best_alg;
2427 struct mult_cost best_cost;
2428 struct mult_cost new_limit;
2429 int op_cost, op_latency;
2430 unsigned HOST_WIDE_INT q;
2431 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2432 int hash_index;
2433 bool cache_hit = false;
2434 enum alg_code cache_alg = alg_zero;
2436 /* Indicate that no algorithm is yet found. If no algorithm
2437 is found, this value will be returned and indicate failure. */
2438 alg_out->cost.cost = cost_limit->cost + 1;
2439 alg_out->cost.latency = cost_limit->latency + 1;
2441 if (cost_limit->cost < 0
2442 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2443 return;
2445 /* Restrict the bits of "t" to the multiplication's mode. */
2446 t &= GET_MODE_MASK (mode);
2448 /* t == 1 can be done in zero cost. */
2449 if (t == 1)
2451 alg_out->ops = 1;
2452 alg_out->cost.cost = 0;
2453 alg_out->cost.latency = 0;
2454 alg_out->op[0] = alg_m;
2455 return;
2458 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2459 fail now. */
2460 if (t == 0)
2462 if (MULT_COST_LESS (cost_limit, zero_cost))
2463 return;
2464 else
2466 alg_out->ops = 1;
2467 alg_out->cost.cost = zero_cost;
2468 alg_out->cost.latency = zero_cost;
2469 alg_out->op[0] = alg_zero;
2470 return;
2474 /* We'll be needing a couple extra algorithm structures now. */
2476 alg_in = alloca (sizeof (struct algorithm));
2477 best_alg = alloca (sizeof (struct algorithm));
2478 best_cost = *cost_limit;
2480 /* Compute the hash index. */
2481 hash_index = (t ^ (unsigned int) mode) % NUM_ALG_HASH_ENTRIES;
2483 /* See if we already know what to do for T. */
2484 if (alg_hash[hash_index].t == t
2485 && alg_hash[hash_index].mode == mode
2486 && alg_hash[hash_index].alg != alg_unknown)
2488 cache_alg = alg_hash[hash_index].alg;
2490 if (cache_alg == alg_impossible)
2492 /* The cache tells us that it's impossible to synthesize
2493 multiplication by T within alg_hash[hash_index].cost. */
2494 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2495 /* COST_LIMIT is at least as restrictive as the one
2496 recorded in the hash table, in which case we have no
2497 hope of synthesizing a multiplication. Just
2498 return. */
2499 return;
2501 /* If we get here, COST_LIMIT is less restrictive than the
2502 one recorded in the hash table, so we may be able to
2503 synthesize a multiplication. Proceed as if we didn't
2504 have the cache entry. */
2506 else
2508 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2509 /* The cached algorithm shows that this multiplication
2510 requires more cost than COST_LIMIT. Just return. This
2511 way, we don't clobber this cache entry with
2512 alg_impossible but retain useful information. */
2513 return;
2515 cache_hit = true;
2517 switch (cache_alg)
2519 case alg_shift:
2520 goto do_alg_shift;
2522 case alg_add_t_m2:
2523 case alg_sub_t_m2:
2524 goto do_alg_addsub_t_m2;
2526 case alg_add_factor:
2527 case alg_sub_factor:
2528 goto do_alg_addsub_factor;
2530 case alg_add_t2_m:
2531 goto do_alg_add_t2_m;
2533 case alg_sub_t2_m:
2534 goto do_alg_sub_t2_m;
2536 default:
2537 gcc_unreachable ();
2542 /* If we have a group of zero bits at the low-order part of T, try
2543 multiplying by the remaining bits and then doing a shift. */
2545 if ((t & 1) == 0)
2547 do_alg_shift:
2548 m = floor_log2 (t & -t); /* m = number of low zero bits */
2549 if (m < maxm)
2551 q = t >> m;
2552 /* The function expand_shift will choose between a shift and
2553 a sequence of additions, so the observed cost is given as
2554 MIN (m * add_cost[mode], shift_cost[mode][m]). */
2555 op_cost = m * add_cost[mode];
2556 if (shift_cost[mode][m] < op_cost)
2557 op_cost = shift_cost[mode][m];
2558 new_limit.cost = best_cost.cost - op_cost;
2559 new_limit.latency = best_cost.latency - op_cost;
2560 synth_mult (alg_in, q, &new_limit, mode);
2562 alg_in->cost.cost += op_cost;
2563 alg_in->cost.latency += op_cost;
2564 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2566 struct algorithm *x;
2567 best_cost = alg_in->cost;
2568 x = alg_in, alg_in = best_alg, best_alg = x;
2569 best_alg->log[best_alg->ops] = m;
2570 best_alg->op[best_alg->ops] = alg_shift;
2573 if (cache_hit)
2574 goto done;
2577 /* If we have an odd number, add or subtract one. */
2578 if ((t & 1) != 0)
2580 unsigned HOST_WIDE_INT w;
2582 do_alg_addsub_t_m2:
2583 for (w = 1; (w & t) != 0; w <<= 1)
2585 /* If T was -1, then W will be zero after the loop. This is another
2586 case where T ends with ...111. Handling this with (T + 1) and
2587 subtract 1 produces slightly better code and results in algorithm
2588 selection much faster than treating it like the ...0111 case
2589 below. */
2590 if (w == 0
2591 || (w > 2
2592 /* Reject the case where t is 3.
2593 Thus we prefer addition in that case. */
2594 && t != 3))
2596 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2598 op_cost = add_cost[mode];
2599 new_limit.cost = best_cost.cost - op_cost;
2600 new_limit.latency = best_cost.latency - op_cost;
2601 synth_mult (alg_in, t + 1, &new_limit, mode);
2603 alg_in->cost.cost += op_cost;
2604 alg_in->cost.latency += op_cost;
2605 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2607 struct algorithm *x;
2608 best_cost = alg_in->cost;
2609 x = alg_in, alg_in = best_alg, best_alg = x;
2610 best_alg->log[best_alg->ops] = 0;
2611 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2614 else
2616 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2618 op_cost = add_cost[mode];
2619 new_limit.cost = best_cost.cost - op_cost;
2620 new_limit.latency = best_cost.latency - op_cost;
2621 synth_mult (alg_in, t - 1, &new_limit, mode);
2623 alg_in->cost.cost += op_cost;
2624 alg_in->cost.latency += op_cost;
2625 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2627 struct algorithm *x;
2628 best_cost = alg_in->cost;
2629 x = alg_in, alg_in = best_alg, best_alg = x;
2630 best_alg->log[best_alg->ops] = 0;
2631 best_alg->op[best_alg->ops] = alg_add_t_m2;
2634 if (cache_hit)
2635 goto done;
2638 /* Look for factors of t of the form
2639 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2640 If we find such a factor, we can multiply by t using an algorithm that
2641 multiplies by q, shift the result by m and add/subtract it to itself.
2643 We search for large factors first and loop down, even if large factors
2644 are less probable than small; if we find a large factor we will find a
2645 good sequence quickly, and therefore be able to prune (by decreasing
2646 COST_LIMIT) the search. */
2648 do_alg_addsub_factor:
2649 for (m = floor_log2 (t - 1); m >= 2; m--)
2651 unsigned HOST_WIDE_INT d;
2653 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2654 if (t % d == 0 && t > d && m < maxm
2655 && (!cache_hit || cache_alg == alg_add_factor))
2657 /* If the target has a cheap shift-and-add instruction use
2658 that in preference to a shift insn followed by an add insn.
2659 Assume that the shift-and-add is "atomic" with a latency
2660 equal to its cost, otherwise assume that on superscalar
2661 hardware the shift may be executed concurrently with the
2662 earlier steps in the algorithm. */
2663 op_cost = add_cost[mode] + shift_cost[mode][m];
2664 if (shiftadd_cost[mode][m] < op_cost)
2666 op_cost = shiftadd_cost[mode][m];
2667 op_latency = op_cost;
2669 else
2670 op_latency = add_cost[mode];
2672 new_limit.cost = best_cost.cost - op_cost;
2673 new_limit.latency = best_cost.latency - op_latency;
2674 synth_mult (alg_in, t / d, &new_limit, mode);
2676 alg_in->cost.cost += op_cost;
2677 alg_in->cost.latency += op_latency;
2678 if (alg_in->cost.latency < op_cost)
2679 alg_in->cost.latency = op_cost;
2680 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2682 struct algorithm *x;
2683 best_cost = alg_in->cost;
2684 x = alg_in, alg_in = best_alg, best_alg = x;
2685 best_alg->log[best_alg->ops] = m;
2686 best_alg->op[best_alg->ops] = alg_add_factor;
2688 /* Other factors will have been taken care of in the recursion. */
2689 break;
2692 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2693 if (t % d == 0 && t > d && m < maxm
2694 && (!cache_hit || cache_alg == alg_sub_factor))
2696 /* If the target has a cheap shift-and-subtract insn use
2697 that in preference to a shift insn followed by a sub insn.
2698 Assume that the shift-and-sub is "atomic" with a latency
2699 equal to it's cost, otherwise assume that on superscalar
2700 hardware the shift may be executed concurrently with the
2701 earlier steps in the algorithm. */
2702 op_cost = add_cost[mode] + shift_cost[mode][m];
2703 if (shiftsub_cost[mode][m] < op_cost)
2705 op_cost = shiftsub_cost[mode][m];
2706 op_latency = op_cost;
2708 else
2709 op_latency = add_cost[mode];
2711 new_limit.cost = best_cost.cost - op_cost;
2712 new_limit.latency = best_cost.latency - op_latency;
2713 synth_mult (alg_in, t / d, &new_limit, mode);
2715 alg_in->cost.cost += op_cost;
2716 alg_in->cost.latency += op_latency;
2717 if (alg_in->cost.latency < op_cost)
2718 alg_in->cost.latency = op_cost;
2719 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2721 struct algorithm *x;
2722 best_cost = alg_in->cost;
2723 x = alg_in, alg_in = best_alg, best_alg = x;
2724 best_alg->log[best_alg->ops] = m;
2725 best_alg->op[best_alg->ops] = alg_sub_factor;
2727 break;
2730 if (cache_hit)
2731 goto done;
2733 /* Try shift-and-add (load effective address) instructions,
2734 i.e. do a*3, a*5, a*9. */
2735 if ((t & 1) != 0)
2737 do_alg_add_t2_m:
2738 q = t - 1;
2739 q = q & -q;
2740 m = exact_log2 (q);
2741 if (m >= 0 && m < maxm)
2743 op_cost = shiftadd_cost[mode][m];
2744 new_limit.cost = best_cost.cost - op_cost;
2745 new_limit.latency = best_cost.latency - op_cost;
2746 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2748 alg_in->cost.cost += op_cost;
2749 alg_in->cost.latency += op_cost;
2750 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2752 struct algorithm *x;
2753 best_cost = alg_in->cost;
2754 x = alg_in, alg_in = best_alg, best_alg = x;
2755 best_alg->log[best_alg->ops] = m;
2756 best_alg->op[best_alg->ops] = alg_add_t2_m;
2759 if (cache_hit)
2760 goto done;
2762 do_alg_sub_t2_m:
2763 q = t + 1;
2764 q = q & -q;
2765 m = exact_log2 (q);
2766 if (m >= 0 && m < maxm)
2768 op_cost = shiftsub_cost[mode][m];
2769 new_limit.cost = best_cost.cost - op_cost;
2770 new_limit.latency = best_cost.latency - op_cost;
2771 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2773 alg_in->cost.cost += op_cost;
2774 alg_in->cost.latency += op_cost;
2775 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2777 struct algorithm *x;
2778 best_cost = alg_in->cost;
2779 x = alg_in, alg_in = best_alg, best_alg = x;
2780 best_alg->log[best_alg->ops] = m;
2781 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2784 if (cache_hit)
2785 goto done;
2788 done:
2789 /* If best_cost has not decreased, we have not found any algorithm. */
2790 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2792 /* We failed to find an algorithm. Record alg_impossible for
2793 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2794 we are asked to find an algorithm for T within the same or
2795 lower COST_LIMIT, we can immediately return to the
2796 caller. */
2797 alg_hash[hash_index].t = t;
2798 alg_hash[hash_index].mode = mode;
2799 alg_hash[hash_index].alg = alg_impossible;
2800 alg_hash[hash_index].cost = *cost_limit;
2801 return;
2804 /* Cache the result. */
2805 if (!cache_hit)
2807 alg_hash[hash_index].t = t;
2808 alg_hash[hash_index].mode = mode;
2809 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2810 alg_hash[hash_index].cost.cost = best_cost.cost;
2811 alg_hash[hash_index].cost.latency = best_cost.latency;
2814 /* If we are getting a too long sequence for `struct algorithm'
2815 to record, make this search fail. */
2816 if (best_alg->ops == MAX_BITS_PER_WORD)
2817 return;
2819 /* Copy the algorithm from temporary space to the space at alg_out.
2820 We avoid using structure assignment because the majority of
2821 best_alg is normally undefined, and this is a critical function. */
2822 alg_out->ops = best_alg->ops + 1;
2823 alg_out->cost = best_cost;
2824 memcpy (alg_out->op, best_alg->op,
2825 alg_out->ops * sizeof *alg_out->op);
2826 memcpy (alg_out->log, best_alg->log,
2827 alg_out->ops * sizeof *alg_out->log);
2830 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2831 Try three variations:
2833 - a shift/add sequence based on VAL itself
2834 - a shift/add sequence based on -VAL, followed by a negation
2835 - a shift/add sequence based on VAL - 1, followed by an addition.
2837 Return true if the cheapest of these cost less than MULT_COST,
2838 describing the algorithm in *ALG and final fixup in *VARIANT. */
2840 static bool
2841 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2842 struct algorithm *alg, enum mult_variant *variant,
2843 int mult_cost)
2845 struct algorithm alg2;
2846 struct mult_cost limit;
2847 int op_cost;
2849 /* Fail quickly for impossible bounds. */
2850 if (mult_cost < 0)
2851 return false;
2853 /* Ensure that mult_cost provides a reasonable upper bound.
2854 Any constant multiplication can be performed with less
2855 than 2 * bits additions. */
2856 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[mode];
2857 if (mult_cost > op_cost)
2858 mult_cost = op_cost;
2860 *variant = basic_variant;
2861 limit.cost = mult_cost;
2862 limit.latency = mult_cost;
2863 synth_mult (alg, val, &limit, mode);
2865 /* This works only if the inverted value actually fits in an
2866 `unsigned int' */
2867 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2869 op_cost = neg_cost[mode];
2870 if (MULT_COST_LESS (&alg->cost, mult_cost))
2872 limit.cost = alg->cost.cost - op_cost;
2873 limit.latency = alg->cost.latency - op_cost;
2875 else
2877 limit.cost = mult_cost - op_cost;
2878 limit.latency = mult_cost - op_cost;
2881 synth_mult (&alg2, -val, &limit, mode);
2882 alg2.cost.cost += op_cost;
2883 alg2.cost.latency += op_cost;
2884 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2885 *alg = alg2, *variant = negate_variant;
2888 /* This proves very useful for division-by-constant. */
2889 op_cost = add_cost[mode];
2890 if (MULT_COST_LESS (&alg->cost, mult_cost))
2892 limit.cost = alg->cost.cost - op_cost;
2893 limit.latency = alg->cost.latency - op_cost;
2895 else
2897 limit.cost = mult_cost - op_cost;
2898 limit.latency = mult_cost - op_cost;
2901 synth_mult (&alg2, val - 1, &limit, mode);
2902 alg2.cost.cost += op_cost;
2903 alg2.cost.latency += op_cost;
2904 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2905 *alg = alg2, *variant = add_variant;
2907 return MULT_COST_LESS (&alg->cost, mult_cost);
2910 /* A subroutine of expand_mult, used for constant multiplications.
2911 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2912 convenient. Use the shift/add sequence described by ALG and apply
2913 the final fixup specified by VARIANT. */
2915 static rtx
2916 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2917 rtx target, const struct algorithm *alg,
2918 enum mult_variant variant)
2920 HOST_WIDE_INT val_so_far;
2921 rtx insn, accum, tem;
2922 int opno;
2923 enum machine_mode nmode;
2925 /* Avoid referencing memory over and over.
2926 For speed, but also for correctness when mem is volatile. */
2927 if (MEM_P (op0))
2928 op0 = force_reg (mode, op0);
2930 /* ACCUM starts out either as OP0 or as a zero, depending on
2931 the first operation. */
2933 if (alg->op[0] == alg_zero)
2935 accum = copy_to_mode_reg (mode, const0_rtx);
2936 val_so_far = 0;
2938 else if (alg->op[0] == alg_m)
2940 accum = copy_to_mode_reg (mode, op0);
2941 val_so_far = 1;
2943 else
2944 gcc_unreachable ();
2946 for (opno = 1; opno < alg->ops; opno++)
2948 int log = alg->log[opno];
2949 rtx shift_subtarget = optimize ? 0 : accum;
2950 rtx add_target
2951 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2952 && !optimize)
2953 ? target : 0;
2954 rtx accum_target = optimize ? 0 : accum;
2956 switch (alg->op[opno])
2958 case alg_shift:
2959 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2960 build_int_cst (NULL_TREE, log),
2961 NULL_RTX, 0);
2962 val_so_far <<= log;
2963 break;
2965 case alg_add_t_m2:
2966 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2967 build_int_cst (NULL_TREE, log),
2968 NULL_RTX, 0);
2969 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2970 add_target ? add_target : accum_target);
2971 val_so_far += (HOST_WIDE_INT) 1 << log;
2972 break;
2974 case alg_sub_t_m2:
2975 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2976 build_int_cst (NULL_TREE, log),
2977 NULL_RTX, 0);
2978 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2979 add_target ? add_target : accum_target);
2980 val_so_far -= (HOST_WIDE_INT) 1 << log;
2981 break;
2983 case alg_add_t2_m:
2984 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2985 build_int_cst (NULL_TREE, log),
2986 shift_subtarget,
2988 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2989 add_target ? add_target : accum_target);
2990 val_so_far = (val_so_far << log) + 1;
2991 break;
2993 case alg_sub_t2_m:
2994 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2995 build_int_cst (NULL_TREE, log),
2996 shift_subtarget, 0);
2997 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2998 add_target ? add_target : accum_target);
2999 val_so_far = (val_so_far << log) - 1;
3000 break;
3002 case alg_add_factor:
3003 tem = expand_shift (LSHIFT_EXPR, mode, accum,
3004 build_int_cst (NULL_TREE, log),
3005 NULL_RTX, 0);
3006 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3007 add_target ? add_target : accum_target);
3008 val_so_far += val_so_far << log;
3009 break;
3011 case alg_sub_factor:
3012 tem = expand_shift (LSHIFT_EXPR, mode, accum,
3013 build_int_cst (NULL_TREE, log),
3014 NULL_RTX, 0);
3015 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3016 (add_target
3017 ? add_target : (optimize ? 0 : tem)));
3018 val_so_far = (val_so_far << log) - val_so_far;
3019 break;
3021 default:
3022 gcc_unreachable ();
3025 /* Write a REG_EQUAL note on the last insn so that we can cse
3026 multiplication sequences. Note that if ACCUM is a SUBREG,
3027 we've set the inner register and must properly indicate
3028 that. */
3030 tem = op0, nmode = mode;
3031 if (GET_CODE (accum) == SUBREG)
3033 nmode = GET_MODE (SUBREG_REG (accum));
3034 tem = gen_lowpart (nmode, op0);
3037 insn = get_last_insn ();
3038 set_unique_reg_note (insn, REG_EQUAL,
3039 gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far)));
3042 if (variant == negate_variant)
3044 val_so_far = -val_so_far;
3045 accum = expand_unop (mode, neg_optab, accum, target, 0);
3047 else if (variant == add_variant)
3049 val_so_far = val_so_far + 1;
3050 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3053 /* Compare only the bits of val and val_so_far that are significant
3054 in the result mode, to avoid sign-/zero-extension confusion. */
3055 val &= GET_MODE_MASK (mode);
3056 val_so_far &= GET_MODE_MASK (mode);
3057 gcc_assert (val == val_so_far);
3059 return accum;
3062 /* Perform a multiplication and return an rtx for the result.
3063 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3064 TARGET is a suggestion for where to store the result (an rtx).
3066 We check specially for a constant integer as OP1.
3067 If you want this check for OP0 as well, then before calling
3068 you should swap the two operands if OP0 would be constant. */
3071 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3072 int unsignedp)
3074 enum mult_variant variant;
3075 struct algorithm algorithm;
3076 int max_cost;
3078 /* Handling const0_rtx here allows us to use zero as a rogue value for
3079 coeff below. */
3080 if (op1 == const0_rtx)
3081 return const0_rtx;
3082 if (op1 == const1_rtx)
3083 return op0;
3084 if (op1 == constm1_rtx)
3085 return expand_unop (mode,
3086 GET_MODE_CLASS (mode) == MODE_INT
3087 && !unsignedp && flag_trapv
3088 ? negv_optab : neg_optab,
3089 op0, target, 0);
3091 /* These are the operations that are potentially turned into a sequence
3092 of shifts and additions. */
3093 if (SCALAR_INT_MODE_P (mode)
3094 && (unsignedp || !flag_trapv))
3096 HOST_WIDE_INT coeff = 0;
3097 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3099 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3100 less than or equal in size to `unsigned int' this doesn't matter.
3101 If the mode is larger than `unsigned int', then synth_mult works
3102 only if the constant value exactly fits in an `unsigned int' without
3103 any truncation. This means that multiplying by negative values does
3104 not work; results are off by 2^32 on a 32 bit machine. */
3106 if (GET_CODE (op1) == CONST_INT)
3108 /* Attempt to handle multiplication of DImode values by negative
3109 coefficients, by performing the multiplication by a positive
3110 multiplier and then inverting the result. */
3111 if (INTVAL (op1) < 0
3112 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3114 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3115 result is interpreted as an unsigned coefficient.
3116 Exclude cost of op0 from max_cost to match the cost
3117 calculation of the synth_mult. */
3118 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET)
3119 - neg_cost[mode];
3120 if (max_cost > 0
3121 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3122 &variant, max_cost))
3124 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3125 NULL_RTX, &algorithm,
3126 variant);
3127 return expand_unop (mode, neg_optab, temp, target, 0);
3130 else coeff = INTVAL (op1);
3132 else if (GET_CODE (op1) == CONST_DOUBLE)
3134 /* If we are multiplying in DImode, it may still be a win
3135 to try to work with shifts and adds. */
3136 if (CONST_DOUBLE_HIGH (op1) == 0)
3137 coeff = CONST_DOUBLE_LOW (op1);
3138 else if (CONST_DOUBLE_LOW (op1) == 0
3139 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3141 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3142 + HOST_BITS_PER_WIDE_INT;
3143 return expand_shift (LSHIFT_EXPR, mode, op0,
3144 build_int_cst (NULL_TREE, shift),
3145 target, unsignedp);
3149 /* We used to test optimize here, on the grounds that it's better to
3150 produce a smaller program when -O is not used. But this causes
3151 such a terrible slowdown sometimes that it seems better to always
3152 use synth_mult. */
3153 if (coeff != 0)
3155 /* Special case powers of two. */
3156 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3157 return expand_shift (LSHIFT_EXPR, mode, op0,
3158 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3159 target, unsignedp);
3161 /* Exclude cost of op0 from max_cost to match the cost
3162 calculation of the synth_mult. */
3163 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET);
3164 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3165 max_cost))
3166 return expand_mult_const (mode, op0, coeff, target,
3167 &algorithm, variant);
3171 if (GET_CODE (op0) == CONST_DOUBLE)
3173 rtx temp = op0;
3174 op0 = op1;
3175 op1 = temp;
3178 /* Expand x*2.0 as x+x. */
3179 if (GET_CODE (op1) == CONST_DOUBLE
3180 && SCALAR_FLOAT_MODE_P (mode))
3182 REAL_VALUE_TYPE d;
3183 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3185 if (REAL_VALUES_EQUAL (d, dconst2))
3187 op0 = force_reg (GET_MODE (op0), op0);
3188 return expand_binop (mode, add_optab, op0, op0,
3189 target, unsignedp, OPTAB_LIB_WIDEN);
3193 /* This used to use umul_optab if unsigned, but for non-widening multiply
3194 there is no difference between signed and unsigned. */
3195 op0 = expand_binop (mode,
3196 ! unsignedp
3197 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3198 ? smulv_optab : smul_optab,
3199 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3200 gcc_assert (op0);
3201 return op0;
3204 /* Return the smallest n such that 2**n >= X. */
3207 ceil_log2 (unsigned HOST_WIDE_INT x)
3209 return floor_log2 (x - 1) + 1;
3212 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3213 replace division by D, and put the least significant N bits of the result
3214 in *MULTIPLIER_PTR and return the most significant bit.
3216 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3217 needed precision is in PRECISION (should be <= N).
3219 PRECISION should be as small as possible so this function can choose
3220 multiplier more freely.
3222 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3223 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3225 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3226 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3228 static
3229 unsigned HOST_WIDE_INT
3230 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3231 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3233 HOST_WIDE_INT mhigh_hi, mlow_hi;
3234 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3235 int lgup, post_shift;
3236 int pow, pow2;
3237 unsigned HOST_WIDE_INT nl, dummy1;
3238 HOST_WIDE_INT nh, dummy2;
3240 /* lgup = ceil(log2(divisor)); */
3241 lgup = ceil_log2 (d);
3243 gcc_assert (lgup <= n);
3245 pow = n + lgup;
3246 pow2 = n + lgup - precision;
3248 /* We could handle this with some effort, but this case is much
3249 better handled directly with a scc insn, so rely on caller using
3250 that. */
3251 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3253 /* mlow = 2^(N + lgup)/d */
3254 if (pow >= HOST_BITS_PER_WIDE_INT)
3256 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3257 nl = 0;
3259 else
3261 nh = 0;
3262 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3264 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3265 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3267 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3268 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3269 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3270 else
3271 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3272 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3273 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3275 gcc_assert (!mhigh_hi || nh - d < d);
3276 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3277 /* Assert that mlow < mhigh. */
3278 gcc_assert (mlow_hi < mhigh_hi
3279 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3281 /* If precision == N, then mlow, mhigh exceed 2^N
3282 (but they do not exceed 2^(N+1)). */
3284 /* Reduce to lowest terms. */
3285 for (post_shift = lgup; post_shift > 0; post_shift--)
3287 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3288 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3289 if (ml_lo >= mh_lo)
3290 break;
3292 mlow_hi = 0;
3293 mlow_lo = ml_lo;
3294 mhigh_hi = 0;
3295 mhigh_lo = mh_lo;
3298 *post_shift_ptr = post_shift;
3299 *lgup_ptr = lgup;
3300 if (n < HOST_BITS_PER_WIDE_INT)
3302 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3303 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3304 return mhigh_lo >= mask;
3306 else
3308 *multiplier_ptr = GEN_INT (mhigh_lo);
3309 return mhigh_hi;
3313 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3314 congruent to 1 (mod 2**N). */
3316 static unsigned HOST_WIDE_INT
3317 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3319 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3321 /* The algorithm notes that the choice y = x satisfies
3322 x*y == 1 mod 2^3, since x is assumed odd.
3323 Each iteration doubles the number of bits of significance in y. */
3325 unsigned HOST_WIDE_INT mask;
3326 unsigned HOST_WIDE_INT y = x;
3327 int nbit = 3;
3329 mask = (n == HOST_BITS_PER_WIDE_INT
3330 ? ~(unsigned HOST_WIDE_INT) 0
3331 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3333 while (nbit < n)
3335 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3336 nbit *= 2;
3338 return y;
3341 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3342 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3343 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3344 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3345 become signed.
3347 The result is put in TARGET if that is convenient.
3349 MODE is the mode of operation. */
3352 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3353 rtx op1, rtx target, int unsignedp)
3355 rtx tem;
3356 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3358 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3359 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3360 NULL_RTX, 0);
3361 tem = expand_and (mode, tem, op1, NULL_RTX);
3362 adj_operand
3363 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3364 adj_operand);
3366 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3367 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3368 NULL_RTX, 0);
3369 tem = expand_and (mode, tem, op0, NULL_RTX);
3370 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3371 target);
3373 return target;
3376 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3378 static rtx
3379 extract_high_half (enum machine_mode mode, rtx op)
3381 enum machine_mode wider_mode;
3383 if (mode == word_mode)
3384 return gen_highpart (mode, op);
3386 wider_mode = GET_MODE_WIDER_MODE (mode);
3387 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3388 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
3389 return convert_modes (mode, wider_mode, op, 0);
3392 /* Like expand_mult_highpart, but only consider using a multiplication
3393 optab. OP1 is an rtx for the constant operand. */
3395 static rtx
3396 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3397 rtx target, int unsignedp, int max_cost)
3399 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3400 enum machine_mode wider_mode;
3401 optab moptab;
3402 rtx tem;
3403 int size;
3405 wider_mode = GET_MODE_WIDER_MODE (mode);
3406 size = GET_MODE_BITSIZE (mode);
3408 /* Firstly, try using a multiplication insn that only generates the needed
3409 high part of the product, and in the sign flavor of unsignedp. */
3410 if (mul_highpart_cost[mode] < max_cost)
3412 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3413 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3414 unsignedp, OPTAB_DIRECT);
3415 if (tem)
3416 return tem;
3419 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3420 Need to adjust the result after the multiplication. */
3421 if (size - 1 < BITS_PER_WORD
3422 && (mul_highpart_cost[mode] + 2 * shift_cost[mode][size-1]
3423 + 4 * add_cost[mode] < max_cost))
3425 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3426 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3427 unsignedp, OPTAB_DIRECT);
3428 if (tem)
3429 /* We used the wrong signedness. Adjust the result. */
3430 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3431 tem, unsignedp);
3434 /* Try widening multiplication. */
3435 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3436 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3437 && mul_widen_cost[wider_mode] < max_cost)
3439 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3440 unsignedp, OPTAB_WIDEN);
3441 if (tem)
3442 return extract_high_half (mode, tem);
3445 /* Try widening the mode and perform a non-widening multiplication. */
3446 if (smul_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3447 && size - 1 < BITS_PER_WORD
3448 && mul_cost[wider_mode] + shift_cost[mode][size-1] < max_cost)
3450 rtx insns, wop0, wop1;
3452 /* We need to widen the operands, for example to ensure the
3453 constant multiplier is correctly sign or zero extended.
3454 Use a sequence to clean-up any instructions emitted by
3455 the conversions if things don't work out. */
3456 start_sequence ();
3457 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3458 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3459 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3460 unsignedp, OPTAB_WIDEN);
3461 insns = get_insns ();
3462 end_sequence ();
3464 if (tem)
3466 emit_insn (insns);
3467 return extract_high_half (mode, tem);
3471 /* Try widening multiplication of opposite signedness, and adjust. */
3472 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3473 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3474 && size - 1 < BITS_PER_WORD
3475 && (mul_widen_cost[wider_mode] + 2 * shift_cost[mode][size-1]
3476 + 4 * add_cost[mode] < max_cost))
3478 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3479 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3480 if (tem != 0)
3482 tem = extract_high_half (mode, tem);
3483 /* We used the wrong signedness. Adjust the result. */
3484 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3485 target, unsignedp);
3489 return 0;
3492 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3493 putting the high half of the result in TARGET if that is convenient,
3494 and return where the result is. If the operation can not be performed,
3495 0 is returned.
3497 MODE is the mode of operation and result.
3499 UNSIGNEDP nonzero means unsigned multiply.
3501 MAX_COST is the total allowed cost for the expanded RTL. */
3503 static rtx
3504 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3505 rtx target, int unsignedp, int max_cost)
3507 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3508 unsigned HOST_WIDE_INT cnst1;
3509 int extra_cost;
3510 bool sign_adjust = false;
3511 enum mult_variant variant;
3512 struct algorithm alg;
3513 rtx tem;
3515 /* We can't support modes wider than HOST_BITS_PER_INT. */
3516 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3518 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3520 /* We can't optimize modes wider than BITS_PER_WORD.
3521 ??? We might be able to perform double-word arithmetic if
3522 mode == word_mode, however all the cost calculations in
3523 synth_mult etc. assume single-word operations. */
3524 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3525 return expand_mult_highpart_optab (mode, op0, op1, target,
3526 unsignedp, max_cost);
3528 extra_cost = shift_cost[mode][GET_MODE_BITSIZE (mode) - 1];
3530 /* Check whether we try to multiply by a negative constant. */
3531 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3533 sign_adjust = true;
3534 extra_cost += add_cost[mode];
3537 /* See whether shift/add multiplication is cheap enough. */
3538 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3539 max_cost - extra_cost))
3541 /* See whether the specialized multiplication optabs are
3542 cheaper than the shift/add version. */
3543 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3544 alg.cost.cost + extra_cost);
3545 if (tem)
3546 return tem;
3548 tem = convert_to_mode (wider_mode, op0, unsignedp);
3549 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3550 tem = extract_high_half (mode, tem);
3552 /* Adjust result for signedness. */
3553 if (sign_adjust)
3554 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3556 return tem;
3558 return expand_mult_highpart_optab (mode, op0, op1, target,
3559 unsignedp, max_cost);
3563 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3565 static rtx
3566 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3568 unsigned HOST_WIDE_INT masklow, maskhigh;
3569 rtx result, temp, shift, label;
3570 int logd;
3572 logd = floor_log2 (d);
3573 result = gen_reg_rtx (mode);
3575 /* Avoid conditional branches when they're expensive. */
3576 if (BRANCH_COST >= 2
3577 && !optimize_size)
3579 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3580 mode, 0, -1);
3581 if (signmask)
3583 signmask = force_reg (mode, signmask);
3584 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3585 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3587 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3588 which instruction sequence to use. If logical right shifts
3589 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3590 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3592 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3593 if (lshr_optab->handlers[mode].insn_code == CODE_FOR_nothing
3594 || rtx_cost (temp, SET) > COSTS_N_INSNS (2))
3596 temp = expand_binop (mode, xor_optab, op0, signmask,
3597 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3598 temp = expand_binop (mode, sub_optab, temp, signmask,
3599 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3600 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3601 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3602 temp = expand_binop (mode, xor_optab, temp, signmask,
3603 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3604 temp = expand_binop (mode, sub_optab, temp, signmask,
3605 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3607 else
3609 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3610 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3611 signmask = force_reg (mode, signmask);
3613 temp = expand_binop (mode, add_optab, op0, signmask,
3614 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3615 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3616 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3617 temp = expand_binop (mode, sub_optab, temp, signmask,
3618 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3620 return temp;
3624 /* Mask contains the mode's signbit and the significant bits of the
3625 modulus. By including the signbit in the operation, many targets
3626 can avoid an explicit compare operation in the following comparison
3627 against zero. */
3629 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3630 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3632 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3633 maskhigh = -1;
3635 else
3636 maskhigh = (HOST_WIDE_INT) -1
3637 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3639 temp = expand_binop (mode, and_optab, op0,
3640 immed_double_const (masklow, maskhigh, mode),
3641 result, 1, OPTAB_LIB_WIDEN);
3642 if (temp != result)
3643 emit_move_insn (result, temp);
3645 label = gen_label_rtx ();
3646 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3648 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3649 0, OPTAB_LIB_WIDEN);
3650 masklow = (HOST_WIDE_INT) -1 << logd;
3651 maskhigh = -1;
3652 temp = expand_binop (mode, ior_optab, temp,
3653 immed_double_const (masklow, maskhigh, mode),
3654 result, 1, OPTAB_LIB_WIDEN);
3655 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3656 0, OPTAB_LIB_WIDEN);
3657 if (temp != result)
3658 emit_move_insn (result, temp);
3659 emit_label (label);
3660 return result;
3663 /* Expand signed division of OP0 by a power of two D in mode MODE.
3664 This routine is only called for positive values of D. */
3666 static rtx
3667 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3669 rtx temp, label;
3670 tree shift;
3671 int logd;
3673 logd = floor_log2 (d);
3674 shift = build_int_cst (NULL_TREE, logd);
3676 if (d == 2 && BRANCH_COST >= 1)
3678 temp = gen_reg_rtx (mode);
3679 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3680 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3681 0, OPTAB_LIB_WIDEN);
3682 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3685 #ifdef HAVE_conditional_move
3686 if (BRANCH_COST >= 2)
3688 rtx temp2;
3690 /* ??? emit_conditional_move forces a stack adjustment via
3691 compare_from_rtx so, if the sequence is discarded, it will
3692 be lost. Do it now instead. */
3693 do_pending_stack_adjust ();
3695 start_sequence ();
3696 temp2 = copy_to_mode_reg (mode, op0);
3697 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3698 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3699 temp = force_reg (mode, temp);
3701 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3702 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3703 mode, temp, temp2, mode, 0);
3704 if (temp2)
3706 rtx seq = get_insns ();
3707 end_sequence ();
3708 emit_insn (seq);
3709 return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
3711 end_sequence ();
3713 #endif
3715 if (BRANCH_COST >= 2)
3717 int ushift = GET_MODE_BITSIZE (mode) - logd;
3719 temp = gen_reg_rtx (mode);
3720 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3721 if (shift_cost[mode][ushift] > COSTS_N_INSNS (1))
3722 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3723 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3724 else
3725 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3726 build_int_cst (NULL_TREE, ushift),
3727 NULL_RTX, 1);
3728 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3729 0, OPTAB_LIB_WIDEN);
3730 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3733 label = gen_label_rtx ();
3734 temp = copy_to_mode_reg (mode, op0);
3735 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3736 expand_inc (temp, GEN_INT (d - 1));
3737 emit_label (label);
3738 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3741 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3742 if that is convenient, and returning where the result is.
3743 You may request either the quotient or the remainder as the result;
3744 specify REM_FLAG nonzero to get the remainder.
3746 CODE is the expression code for which kind of division this is;
3747 it controls how rounding is done. MODE is the machine mode to use.
3748 UNSIGNEDP nonzero means do unsigned division. */
3750 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3751 and then correct it by or'ing in missing high bits
3752 if result of ANDI is nonzero.
3753 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3754 This could optimize to a bfexts instruction.
3755 But C doesn't use these operations, so their optimizations are
3756 left for later. */
3757 /* ??? For modulo, we don't actually need the highpart of the first product,
3758 the low part will do nicely. And for small divisors, the second multiply
3759 can also be a low-part only multiply or even be completely left out.
3760 E.g. to calculate the remainder of a division by 3 with a 32 bit
3761 multiply, multiply with 0x55555556 and extract the upper two bits;
3762 the result is exact for inputs up to 0x1fffffff.
3763 The input range can be reduced by using cross-sum rules.
3764 For odd divisors >= 3, the following table gives right shift counts
3765 so that if a number is shifted by an integer multiple of the given
3766 amount, the remainder stays the same:
3767 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3768 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3769 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3770 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3771 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3773 Cross-sum rules for even numbers can be derived by leaving as many bits
3774 to the right alone as the divisor has zeros to the right.
3775 E.g. if x is an unsigned 32 bit number:
3776 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3780 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3781 rtx op0, rtx op1, rtx target, int unsignedp)
3783 enum machine_mode compute_mode;
3784 rtx tquotient;
3785 rtx quotient = 0, remainder = 0;
3786 rtx last;
3787 int size;
3788 rtx insn, set;
3789 optab optab1, optab2;
3790 int op1_is_constant, op1_is_pow2 = 0;
3791 int max_cost, extra_cost;
3792 static HOST_WIDE_INT last_div_const = 0;
3793 static HOST_WIDE_INT ext_op1;
3795 op1_is_constant = GET_CODE (op1) == CONST_INT;
3796 if (op1_is_constant)
3798 ext_op1 = INTVAL (op1);
3799 if (unsignedp)
3800 ext_op1 &= GET_MODE_MASK (mode);
3801 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3802 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3806 This is the structure of expand_divmod:
3808 First comes code to fix up the operands so we can perform the operations
3809 correctly and efficiently.
3811 Second comes a switch statement with code specific for each rounding mode.
3812 For some special operands this code emits all RTL for the desired
3813 operation, for other cases, it generates only a quotient and stores it in
3814 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3815 to indicate that it has not done anything.
3817 Last comes code that finishes the operation. If QUOTIENT is set and
3818 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3819 QUOTIENT is not set, it is computed using trunc rounding.
3821 We try to generate special code for division and remainder when OP1 is a
3822 constant. If |OP1| = 2**n we can use shifts and some other fast
3823 operations. For other values of OP1, we compute a carefully selected
3824 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3825 by m.
3827 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3828 half of the product. Different strategies for generating the product are
3829 implemented in expand_mult_highpart.
3831 If what we actually want is the remainder, we generate that by another
3832 by-constant multiplication and a subtraction. */
3834 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3835 code below will malfunction if we are, so check here and handle
3836 the special case if so. */
3837 if (op1 == const1_rtx)
3838 return rem_flag ? const0_rtx : op0;
3840 /* When dividing by -1, we could get an overflow.
3841 negv_optab can handle overflows. */
3842 if (! unsignedp && op1 == constm1_rtx)
3844 if (rem_flag)
3845 return const0_rtx;
3846 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3847 ? negv_optab : neg_optab, op0, target, 0);
3850 if (target
3851 /* Don't use the function value register as a target
3852 since we have to read it as well as write it,
3853 and function-inlining gets confused by this. */
3854 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3855 /* Don't clobber an operand while doing a multi-step calculation. */
3856 || ((rem_flag || op1_is_constant)
3857 && (reg_mentioned_p (target, op0)
3858 || (MEM_P (op0) && MEM_P (target))))
3859 || reg_mentioned_p (target, op1)
3860 || (MEM_P (op1) && MEM_P (target))))
3861 target = 0;
3863 /* Get the mode in which to perform this computation. Normally it will
3864 be MODE, but sometimes we can't do the desired operation in MODE.
3865 If so, pick a wider mode in which we can do the operation. Convert
3866 to that mode at the start to avoid repeated conversions.
3868 First see what operations we need. These depend on the expression
3869 we are evaluating. (We assume that divxx3 insns exist under the
3870 same conditions that modxx3 insns and that these insns don't normally
3871 fail. If these assumptions are not correct, we may generate less
3872 efficient code in some cases.)
3874 Then see if we find a mode in which we can open-code that operation
3875 (either a division, modulus, or shift). Finally, check for the smallest
3876 mode for which we can do the operation with a library call. */
3878 /* We might want to refine this now that we have division-by-constant
3879 optimization. Since expand_mult_highpart tries so many variants, it is
3880 not straightforward to generalize this. Maybe we should make an array
3881 of possible modes in init_expmed? Save this for GCC 2.7. */
3883 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3884 ? (unsignedp ? lshr_optab : ashr_optab)
3885 : (unsignedp ? udiv_optab : sdiv_optab));
3886 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3887 ? optab1
3888 : (unsignedp ? udivmod_optab : sdivmod_optab));
3890 for (compute_mode = mode; compute_mode != VOIDmode;
3891 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3892 if (optab1->handlers[compute_mode].insn_code != CODE_FOR_nothing
3893 || optab2->handlers[compute_mode].insn_code != CODE_FOR_nothing)
3894 break;
3896 if (compute_mode == VOIDmode)
3897 for (compute_mode = mode; compute_mode != VOIDmode;
3898 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3899 if (optab1->handlers[compute_mode].libfunc
3900 || optab2->handlers[compute_mode].libfunc)
3901 break;
3903 /* If we still couldn't find a mode, use MODE, but expand_binop will
3904 probably die. */
3905 if (compute_mode == VOIDmode)
3906 compute_mode = mode;
3908 if (target && GET_MODE (target) == compute_mode)
3909 tquotient = target;
3910 else
3911 tquotient = gen_reg_rtx (compute_mode);
3913 size = GET_MODE_BITSIZE (compute_mode);
3914 #if 0
3915 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3916 (mode), and thereby get better code when OP1 is a constant. Do that
3917 later. It will require going over all usages of SIZE below. */
3918 size = GET_MODE_BITSIZE (mode);
3919 #endif
3921 /* Only deduct something for a REM if the last divide done was
3922 for a different constant. Then set the constant of the last
3923 divide. */
3924 max_cost = unsignedp ? udiv_cost[compute_mode] : sdiv_cost[compute_mode];
3925 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3926 && INTVAL (op1) == last_div_const))
3927 max_cost -= mul_cost[compute_mode] + add_cost[compute_mode];
3929 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3931 /* Now convert to the best mode to use. */
3932 if (compute_mode != mode)
3934 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3935 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3937 /* convert_modes may have placed op1 into a register, so we
3938 must recompute the following. */
3939 op1_is_constant = GET_CODE (op1) == CONST_INT;
3940 op1_is_pow2 = (op1_is_constant
3941 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3942 || (! unsignedp
3943 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3946 /* If one of the operands is a volatile MEM, copy it into a register. */
3948 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3949 op0 = force_reg (compute_mode, op0);
3950 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3951 op1 = force_reg (compute_mode, op1);
3953 /* If we need the remainder or if OP1 is constant, we need to
3954 put OP0 in a register in case it has any queued subexpressions. */
3955 if (rem_flag || op1_is_constant)
3956 op0 = force_reg (compute_mode, op0);
3958 last = get_last_insn ();
3960 /* Promote floor rounding to trunc rounding for unsigned operations. */
3961 if (unsignedp)
3963 if (code == FLOOR_DIV_EXPR)
3964 code = TRUNC_DIV_EXPR;
3965 if (code == FLOOR_MOD_EXPR)
3966 code = TRUNC_MOD_EXPR;
3967 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3968 code = TRUNC_DIV_EXPR;
3971 if (op1 != const0_rtx)
3972 switch (code)
3974 case TRUNC_MOD_EXPR:
3975 case TRUNC_DIV_EXPR:
3976 if (op1_is_constant)
3978 if (unsignedp)
3980 unsigned HOST_WIDE_INT mh;
3981 int pre_shift, post_shift;
3982 int dummy;
3983 rtx ml;
3984 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3985 & GET_MODE_MASK (compute_mode));
3987 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3989 pre_shift = floor_log2 (d);
3990 if (rem_flag)
3992 remainder
3993 = expand_binop (compute_mode, and_optab, op0,
3994 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3995 remainder, 1,
3996 OPTAB_LIB_WIDEN);
3997 if (remainder)
3998 return gen_lowpart (mode, remainder);
4000 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4001 build_int_cst (NULL_TREE,
4002 pre_shift),
4003 tquotient, 1);
4005 else if (size <= HOST_BITS_PER_WIDE_INT)
4007 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
4009 /* Most significant bit of divisor is set; emit an scc
4010 insn. */
4011 quotient = emit_store_flag (tquotient, GEU, op0, op1,
4012 compute_mode, 1, 1);
4013 if (quotient == 0)
4014 goto fail1;
4016 else
4018 /* Find a suitable multiplier and right shift count
4019 instead of multiplying with D. */
4021 mh = choose_multiplier (d, size, size,
4022 &ml, &post_shift, &dummy);
4024 /* If the suggested multiplier is more than SIZE bits,
4025 we can do better for even divisors, using an
4026 initial right shift. */
4027 if (mh != 0 && (d & 1) == 0)
4029 pre_shift = floor_log2 (d & -d);
4030 mh = choose_multiplier (d >> pre_shift, size,
4031 size - pre_shift,
4032 &ml, &post_shift, &dummy);
4033 gcc_assert (!mh);
4035 else
4036 pre_shift = 0;
4038 if (mh != 0)
4040 rtx t1, t2, t3, t4;
4042 if (post_shift - 1 >= BITS_PER_WORD)
4043 goto fail1;
4045 extra_cost
4046 = (shift_cost[compute_mode][post_shift - 1]
4047 + shift_cost[compute_mode][1]
4048 + 2 * add_cost[compute_mode]);
4049 t1 = expand_mult_highpart (compute_mode, op0, ml,
4050 NULL_RTX, 1,
4051 max_cost - extra_cost);
4052 if (t1 == 0)
4053 goto fail1;
4054 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4055 op0, t1),
4056 NULL_RTX);
4057 t3 = expand_shift
4058 (RSHIFT_EXPR, compute_mode, t2,
4059 build_int_cst (NULL_TREE, 1),
4060 NULL_RTX,1);
4061 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4062 t1, t3),
4063 NULL_RTX);
4064 quotient = expand_shift
4065 (RSHIFT_EXPR, compute_mode, t4,
4066 build_int_cst (NULL_TREE, post_shift - 1),
4067 tquotient, 1);
4069 else
4071 rtx t1, t2;
4073 if (pre_shift >= BITS_PER_WORD
4074 || post_shift >= BITS_PER_WORD)
4075 goto fail1;
4077 t1 = expand_shift
4078 (RSHIFT_EXPR, compute_mode, op0,
4079 build_int_cst (NULL_TREE, pre_shift),
4080 NULL_RTX, 1);
4081 extra_cost
4082 = (shift_cost[compute_mode][pre_shift]
4083 + shift_cost[compute_mode][post_shift]);
4084 t2 = expand_mult_highpart (compute_mode, t1, ml,
4085 NULL_RTX, 1,
4086 max_cost - extra_cost);
4087 if (t2 == 0)
4088 goto fail1;
4089 quotient = expand_shift
4090 (RSHIFT_EXPR, compute_mode, t2,
4091 build_int_cst (NULL_TREE, post_shift),
4092 tquotient, 1);
4096 else /* Too wide mode to use tricky code */
4097 break;
4099 insn = get_last_insn ();
4100 if (insn != last
4101 && (set = single_set (insn)) != 0
4102 && SET_DEST (set) == quotient)
4103 set_unique_reg_note (insn,
4104 REG_EQUAL,
4105 gen_rtx_UDIV (compute_mode, op0, op1));
4107 else /* TRUNC_DIV, signed */
4109 unsigned HOST_WIDE_INT ml;
4110 int lgup, post_shift;
4111 rtx mlr;
4112 HOST_WIDE_INT d = INTVAL (op1);
4113 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
4115 /* n rem d = n rem -d */
4116 if (rem_flag && d < 0)
4118 d = abs_d;
4119 op1 = gen_int_mode (abs_d, compute_mode);
4122 if (d == 1)
4123 quotient = op0;
4124 else if (d == -1)
4125 quotient = expand_unop (compute_mode, neg_optab, op0,
4126 tquotient, 0);
4127 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4129 /* This case is not handled correctly below. */
4130 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4131 compute_mode, 1, 1);
4132 if (quotient == 0)
4133 goto fail1;
4135 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4136 && (rem_flag ? smod_pow2_cheap[compute_mode]
4137 : sdiv_pow2_cheap[compute_mode])
4138 /* We assume that cheap metric is true if the
4139 optab has an expander for this mode. */
4140 && (((rem_flag ? smod_optab : sdiv_optab)
4141 ->handlers[compute_mode].insn_code
4142 != CODE_FOR_nothing)
4143 || (sdivmod_optab->handlers[compute_mode]
4144 .insn_code != CODE_FOR_nothing)))
4146 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4148 if (rem_flag)
4150 remainder = expand_smod_pow2 (compute_mode, op0, d);
4151 if (remainder)
4152 return gen_lowpart (mode, remainder);
4155 if (sdiv_pow2_cheap[compute_mode]
4156 && ((sdiv_optab->handlers[compute_mode].insn_code
4157 != CODE_FOR_nothing)
4158 || (sdivmod_optab->handlers[compute_mode].insn_code
4159 != CODE_FOR_nothing)))
4160 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4161 compute_mode, op0,
4162 gen_int_mode (abs_d,
4163 compute_mode),
4164 NULL_RTX, 0);
4165 else
4166 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4168 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4169 negate the quotient. */
4170 if (d < 0)
4172 insn = get_last_insn ();
4173 if (insn != last
4174 && (set = single_set (insn)) != 0
4175 && SET_DEST (set) == quotient
4176 && abs_d < ((unsigned HOST_WIDE_INT) 1
4177 << (HOST_BITS_PER_WIDE_INT - 1)))
4178 set_unique_reg_note (insn,
4179 REG_EQUAL,
4180 gen_rtx_DIV (compute_mode,
4181 op0,
4182 GEN_INT
4183 (trunc_int_for_mode
4184 (abs_d,
4185 compute_mode))));
4187 quotient = expand_unop (compute_mode, neg_optab,
4188 quotient, quotient, 0);
4191 else if (size <= HOST_BITS_PER_WIDE_INT)
4193 choose_multiplier (abs_d, size, size - 1,
4194 &mlr, &post_shift, &lgup);
4195 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4196 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4198 rtx t1, t2, t3;
4200 if (post_shift >= BITS_PER_WORD
4201 || size - 1 >= BITS_PER_WORD)
4202 goto fail1;
4204 extra_cost = (shift_cost[compute_mode][post_shift]
4205 + shift_cost[compute_mode][size - 1]
4206 + add_cost[compute_mode]);
4207 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4208 NULL_RTX, 0,
4209 max_cost - extra_cost);
4210 if (t1 == 0)
4211 goto fail1;
4212 t2 = expand_shift
4213 (RSHIFT_EXPR, compute_mode, t1,
4214 build_int_cst (NULL_TREE, post_shift),
4215 NULL_RTX, 0);
4216 t3 = expand_shift
4217 (RSHIFT_EXPR, compute_mode, op0,
4218 build_int_cst (NULL_TREE, size - 1),
4219 NULL_RTX, 0);
4220 if (d < 0)
4221 quotient
4222 = force_operand (gen_rtx_MINUS (compute_mode,
4223 t3, t2),
4224 tquotient);
4225 else
4226 quotient
4227 = force_operand (gen_rtx_MINUS (compute_mode,
4228 t2, t3),
4229 tquotient);
4231 else
4233 rtx t1, t2, t3, t4;
4235 if (post_shift >= BITS_PER_WORD
4236 || size - 1 >= BITS_PER_WORD)
4237 goto fail1;
4239 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4240 mlr = gen_int_mode (ml, compute_mode);
4241 extra_cost = (shift_cost[compute_mode][post_shift]
4242 + shift_cost[compute_mode][size - 1]
4243 + 2 * add_cost[compute_mode]);
4244 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4245 NULL_RTX, 0,
4246 max_cost - extra_cost);
4247 if (t1 == 0)
4248 goto fail1;
4249 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4250 t1, op0),
4251 NULL_RTX);
4252 t3 = expand_shift
4253 (RSHIFT_EXPR, compute_mode, t2,
4254 build_int_cst (NULL_TREE, post_shift),
4255 NULL_RTX, 0);
4256 t4 = expand_shift
4257 (RSHIFT_EXPR, compute_mode, op0,
4258 build_int_cst (NULL_TREE, size - 1),
4259 NULL_RTX, 0);
4260 if (d < 0)
4261 quotient
4262 = force_operand (gen_rtx_MINUS (compute_mode,
4263 t4, t3),
4264 tquotient);
4265 else
4266 quotient
4267 = force_operand (gen_rtx_MINUS (compute_mode,
4268 t3, t4),
4269 tquotient);
4272 else /* Too wide mode to use tricky code */
4273 break;
4275 insn = get_last_insn ();
4276 if (insn != last
4277 && (set = single_set (insn)) != 0
4278 && SET_DEST (set) == quotient)
4279 set_unique_reg_note (insn,
4280 REG_EQUAL,
4281 gen_rtx_DIV (compute_mode, op0, op1));
4283 break;
4285 fail1:
4286 delete_insns_since (last);
4287 break;
4289 case FLOOR_DIV_EXPR:
4290 case FLOOR_MOD_EXPR:
4291 /* We will come here only for signed operations. */
4292 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4294 unsigned HOST_WIDE_INT mh;
4295 int pre_shift, lgup, post_shift;
4296 HOST_WIDE_INT d = INTVAL (op1);
4297 rtx ml;
4299 if (d > 0)
4301 /* We could just as easily deal with negative constants here,
4302 but it does not seem worth the trouble for GCC 2.6. */
4303 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4305 pre_shift = floor_log2 (d);
4306 if (rem_flag)
4308 remainder = expand_binop (compute_mode, and_optab, op0,
4309 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4310 remainder, 0, OPTAB_LIB_WIDEN);
4311 if (remainder)
4312 return gen_lowpart (mode, remainder);
4314 quotient = expand_shift
4315 (RSHIFT_EXPR, compute_mode, op0,
4316 build_int_cst (NULL_TREE, pre_shift),
4317 tquotient, 0);
4319 else
4321 rtx t1, t2, t3, t4;
4323 mh = choose_multiplier (d, size, size - 1,
4324 &ml, &post_shift, &lgup);
4325 gcc_assert (!mh);
4327 if (post_shift < BITS_PER_WORD
4328 && size - 1 < BITS_PER_WORD)
4330 t1 = expand_shift
4331 (RSHIFT_EXPR, compute_mode, op0,
4332 build_int_cst (NULL_TREE, size - 1),
4333 NULL_RTX, 0);
4334 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4335 NULL_RTX, 0, OPTAB_WIDEN);
4336 extra_cost = (shift_cost[compute_mode][post_shift]
4337 + shift_cost[compute_mode][size - 1]
4338 + 2 * add_cost[compute_mode]);
4339 t3 = expand_mult_highpart (compute_mode, t2, ml,
4340 NULL_RTX, 1,
4341 max_cost - extra_cost);
4342 if (t3 != 0)
4344 t4 = expand_shift
4345 (RSHIFT_EXPR, compute_mode, t3,
4346 build_int_cst (NULL_TREE, post_shift),
4347 NULL_RTX, 1);
4348 quotient = expand_binop (compute_mode, xor_optab,
4349 t4, t1, tquotient, 0,
4350 OPTAB_WIDEN);
4355 else
4357 rtx nsign, t1, t2, t3, t4;
4358 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4359 op0, constm1_rtx), NULL_RTX);
4360 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4361 0, OPTAB_WIDEN);
4362 nsign = expand_shift
4363 (RSHIFT_EXPR, compute_mode, t2,
4364 build_int_cst (NULL_TREE, size - 1),
4365 NULL_RTX, 0);
4366 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4367 NULL_RTX);
4368 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4369 NULL_RTX, 0);
4370 if (t4)
4372 rtx t5;
4373 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4374 NULL_RTX, 0);
4375 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4376 t4, t5),
4377 tquotient);
4382 if (quotient != 0)
4383 break;
4384 delete_insns_since (last);
4386 /* Try using an instruction that produces both the quotient and
4387 remainder, using truncation. We can easily compensate the quotient
4388 or remainder to get floor rounding, once we have the remainder.
4389 Notice that we compute also the final remainder value here,
4390 and return the result right away. */
4391 if (target == 0 || GET_MODE (target) != compute_mode)
4392 target = gen_reg_rtx (compute_mode);
4394 if (rem_flag)
4396 remainder
4397 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4398 quotient = gen_reg_rtx (compute_mode);
4400 else
4402 quotient
4403 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4404 remainder = gen_reg_rtx (compute_mode);
4407 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4408 quotient, remainder, 0))
4410 /* This could be computed with a branch-less sequence.
4411 Save that for later. */
4412 rtx tem;
4413 rtx label = gen_label_rtx ();
4414 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4415 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4416 NULL_RTX, 0, OPTAB_WIDEN);
4417 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4418 expand_dec (quotient, const1_rtx);
4419 expand_inc (remainder, op1);
4420 emit_label (label);
4421 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4424 /* No luck with division elimination or divmod. Have to do it
4425 by conditionally adjusting op0 *and* the result. */
4427 rtx label1, label2, label3, label4, label5;
4428 rtx adjusted_op0;
4429 rtx tem;
4431 quotient = gen_reg_rtx (compute_mode);
4432 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4433 label1 = gen_label_rtx ();
4434 label2 = gen_label_rtx ();
4435 label3 = gen_label_rtx ();
4436 label4 = gen_label_rtx ();
4437 label5 = gen_label_rtx ();
4438 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4439 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4440 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4441 quotient, 0, OPTAB_LIB_WIDEN);
4442 if (tem != quotient)
4443 emit_move_insn (quotient, tem);
4444 emit_jump_insn (gen_jump (label5));
4445 emit_barrier ();
4446 emit_label (label1);
4447 expand_inc (adjusted_op0, const1_rtx);
4448 emit_jump_insn (gen_jump (label4));
4449 emit_barrier ();
4450 emit_label (label2);
4451 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4452 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4453 quotient, 0, OPTAB_LIB_WIDEN);
4454 if (tem != quotient)
4455 emit_move_insn (quotient, tem);
4456 emit_jump_insn (gen_jump (label5));
4457 emit_barrier ();
4458 emit_label (label3);
4459 expand_dec (adjusted_op0, const1_rtx);
4460 emit_label (label4);
4461 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4462 quotient, 0, OPTAB_LIB_WIDEN);
4463 if (tem != quotient)
4464 emit_move_insn (quotient, tem);
4465 expand_dec (quotient, const1_rtx);
4466 emit_label (label5);
4468 break;
4470 case CEIL_DIV_EXPR:
4471 case CEIL_MOD_EXPR:
4472 if (unsignedp)
4474 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4476 rtx t1, t2, t3;
4477 unsigned HOST_WIDE_INT d = INTVAL (op1);
4478 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4479 build_int_cst (NULL_TREE, floor_log2 (d)),
4480 tquotient, 1);
4481 t2 = expand_binop (compute_mode, and_optab, op0,
4482 GEN_INT (d - 1),
4483 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4484 t3 = gen_reg_rtx (compute_mode);
4485 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4486 compute_mode, 1, 1);
4487 if (t3 == 0)
4489 rtx lab;
4490 lab = gen_label_rtx ();
4491 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4492 expand_inc (t1, const1_rtx);
4493 emit_label (lab);
4494 quotient = t1;
4496 else
4497 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4498 t1, t3),
4499 tquotient);
4500 break;
4503 /* Try using an instruction that produces both the quotient and
4504 remainder, using truncation. We can easily compensate the
4505 quotient or remainder to get ceiling rounding, once we have the
4506 remainder. Notice that we compute also the final remainder
4507 value here, and return the result right away. */
4508 if (target == 0 || GET_MODE (target) != compute_mode)
4509 target = gen_reg_rtx (compute_mode);
4511 if (rem_flag)
4513 remainder = (REG_P (target)
4514 ? target : gen_reg_rtx (compute_mode));
4515 quotient = gen_reg_rtx (compute_mode);
4517 else
4519 quotient = (REG_P (target)
4520 ? target : gen_reg_rtx (compute_mode));
4521 remainder = gen_reg_rtx (compute_mode);
4524 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4525 remainder, 1))
4527 /* This could be computed with a branch-less sequence.
4528 Save that for later. */
4529 rtx label = gen_label_rtx ();
4530 do_cmp_and_jump (remainder, const0_rtx, EQ,
4531 compute_mode, label);
4532 expand_inc (quotient, const1_rtx);
4533 expand_dec (remainder, op1);
4534 emit_label (label);
4535 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4538 /* No luck with division elimination or divmod. Have to do it
4539 by conditionally adjusting op0 *and* the result. */
4541 rtx label1, label2;
4542 rtx adjusted_op0, tem;
4544 quotient = gen_reg_rtx (compute_mode);
4545 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4546 label1 = gen_label_rtx ();
4547 label2 = gen_label_rtx ();
4548 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4549 compute_mode, label1);
4550 emit_move_insn (quotient, const0_rtx);
4551 emit_jump_insn (gen_jump (label2));
4552 emit_barrier ();
4553 emit_label (label1);
4554 expand_dec (adjusted_op0, const1_rtx);
4555 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4556 quotient, 1, OPTAB_LIB_WIDEN);
4557 if (tem != quotient)
4558 emit_move_insn (quotient, tem);
4559 expand_inc (quotient, const1_rtx);
4560 emit_label (label2);
4563 else /* signed */
4565 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4566 && INTVAL (op1) >= 0)
4568 /* This is extremely similar to the code for the unsigned case
4569 above. For 2.7 we should merge these variants, but for
4570 2.6.1 I don't want to touch the code for unsigned since that
4571 get used in C. The signed case will only be used by other
4572 languages (Ada). */
4574 rtx t1, t2, t3;
4575 unsigned HOST_WIDE_INT d = INTVAL (op1);
4576 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4577 build_int_cst (NULL_TREE, floor_log2 (d)),
4578 tquotient, 0);
4579 t2 = expand_binop (compute_mode, and_optab, op0,
4580 GEN_INT (d - 1),
4581 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4582 t3 = gen_reg_rtx (compute_mode);
4583 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4584 compute_mode, 1, 1);
4585 if (t3 == 0)
4587 rtx lab;
4588 lab = gen_label_rtx ();
4589 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4590 expand_inc (t1, const1_rtx);
4591 emit_label (lab);
4592 quotient = t1;
4594 else
4595 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4596 t1, t3),
4597 tquotient);
4598 break;
4601 /* Try using an instruction that produces both the quotient and
4602 remainder, using truncation. We can easily compensate the
4603 quotient or remainder to get ceiling rounding, once we have the
4604 remainder. Notice that we compute also the final remainder
4605 value here, and return the result right away. */
4606 if (target == 0 || GET_MODE (target) != compute_mode)
4607 target = gen_reg_rtx (compute_mode);
4608 if (rem_flag)
4610 remainder= (REG_P (target)
4611 ? target : gen_reg_rtx (compute_mode));
4612 quotient = gen_reg_rtx (compute_mode);
4614 else
4616 quotient = (REG_P (target)
4617 ? target : gen_reg_rtx (compute_mode));
4618 remainder = gen_reg_rtx (compute_mode);
4621 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4622 remainder, 0))
4624 /* This could be computed with a branch-less sequence.
4625 Save that for later. */
4626 rtx tem;
4627 rtx label = gen_label_rtx ();
4628 do_cmp_and_jump (remainder, const0_rtx, EQ,
4629 compute_mode, label);
4630 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4631 NULL_RTX, 0, OPTAB_WIDEN);
4632 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4633 expand_inc (quotient, const1_rtx);
4634 expand_dec (remainder, op1);
4635 emit_label (label);
4636 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4639 /* No luck with division elimination or divmod. Have to do it
4640 by conditionally adjusting op0 *and* the result. */
4642 rtx label1, label2, label3, label4, label5;
4643 rtx adjusted_op0;
4644 rtx tem;
4646 quotient = gen_reg_rtx (compute_mode);
4647 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4648 label1 = gen_label_rtx ();
4649 label2 = gen_label_rtx ();
4650 label3 = gen_label_rtx ();
4651 label4 = gen_label_rtx ();
4652 label5 = gen_label_rtx ();
4653 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4654 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4655 compute_mode, label1);
4656 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4657 quotient, 0, OPTAB_LIB_WIDEN);
4658 if (tem != quotient)
4659 emit_move_insn (quotient, tem);
4660 emit_jump_insn (gen_jump (label5));
4661 emit_barrier ();
4662 emit_label (label1);
4663 expand_dec (adjusted_op0, const1_rtx);
4664 emit_jump_insn (gen_jump (label4));
4665 emit_barrier ();
4666 emit_label (label2);
4667 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4668 compute_mode, label3);
4669 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4670 quotient, 0, OPTAB_LIB_WIDEN);
4671 if (tem != quotient)
4672 emit_move_insn (quotient, tem);
4673 emit_jump_insn (gen_jump (label5));
4674 emit_barrier ();
4675 emit_label (label3);
4676 expand_inc (adjusted_op0, const1_rtx);
4677 emit_label (label4);
4678 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4679 quotient, 0, OPTAB_LIB_WIDEN);
4680 if (tem != quotient)
4681 emit_move_insn (quotient, tem);
4682 expand_inc (quotient, const1_rtx);
4683 emit_label (label5);
4686 break;
4688 case EXACT_DIV_EXPR:
4689 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4691 HOST_WIDE_INT d = INTVAL (op1);
4692 unsigned HOST_WIDE_INT ml;
4693 int pre_shift;
4694 rtx t1;
4696 pre_shift = floor_log2 (d & -d);
4697 ml = invert_mod2n (d >> pre_shift, size);
4698 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4699 build_int_cst (NULL_TREE, pre_shift),
4700 NULL_RTX, unsignedp);
4701 quotient = expand_mult (compute_mode, t1,
4702 gen_int_mode (ml, compute_mode),
4703 NULL_RTX, 1);
4705 insn = get_last_insn ();
4706 set_unique_reg_note (insn,
4707 REG_EQUAL,
4708 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4709 compute_mode,
4710 op0, op1));
4712 break;
4714 case ROUND_DIV_EXPR:
4715 case ROUND_MOD_EXPR:
4716 if (unsignedp)
4718 rtx tem;
4719 rtx label;
4720 label = gen_label_rtx ();
4721 quotient = gen_reg_rtx (compute_mode);
4722 remainder = gen_reg_rtx (compute_mode);
4723 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4725 rtx tem;
4726 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4727 quotient, 1, OPTAB_LIB_WIDEN);
4728 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4729 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4730 remainder, 1, OPTAB_LIB_WIDEN);
4732 tem = plus_constant (op1, -1);
4733 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4734 build_int_cst (NULL_TREE, 1),
4735 NULL_RTX, 1);
4736 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4737 expand_inc (quotient, const1_rtx);
4738 expand_dec (remainder, op1);
4739 emit_label (label);
4741 else
4743 rtx abs_rem, abs_op1, tem, mask;
4744 rtx label;
4745 label = gen_label_rtx ();
4746 quotient = gen_reg_rtx (compute_mode);
4747 remainder = gen_reg_rtx (compute_mode);
4748 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4750 rtx tem;
4751 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4752 quotient, 0, OPTAB_LIB_WIDEN);
4753 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4754 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4755 remainder, 0, OPTAB_LIB_WIDEN);
4757 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4758 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4759 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4760 build_int_cst (NULL_TREE, 1),
4761 NULL_RTX, 1);
4762 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4763 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4764 NULL_RTX, 0, OPTAB_WIDEN);
4765 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4766 build_int_cst (NULL_TREE, size - 1),
4767 NULL_RTX, 0);
4768 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4769 NULL_RTX, 0, OPTAB_WIDEN);
4770 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4771 NULL_RTX, 0, OPTAB_WIDEN);
4772 expand_inc (quotient, tem);
4773 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4774 NULL_RTX, 0, OPTAB_WIDEN);
4775 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4776 NULL_RTX, 0, OPTAB_WIDEN);
4777 expand_dec (remainder, tem);
4778 emit_label (label);
4780 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4782 default:
4783 gcc_unreachable ();
4786 if (quotient == 0)
4788 if (target && GET_MODE (target) != compute_mode)
4789 target = 0;
4791 if (rem_flag)
4793 /* Try to produce the remainder without producing the quotient.
4794 If we seem to have a divmod pattern that does not require widening,
4795 don't try widening here. We should really have a WIDEN argument
4796 to expand_twoval_binop, since what we'd really like to do here is
4797 1) try a mod insn in compute_mode
4798 2) try a divmod insn in compute_mode
4799 3) try a div insn in compute_mode and multiply-subtract to get
4800 remainder
4801 4) try the same things with widening allowed. */
4802 remainder
4803 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4804 op0, op1, target,
4805 unsignedp,
4806 ((optab2->handlers[compute_mode].insn_code
4807 != CODE_FOR_nothing)
4808 ? OPTAB_DIRECT : OPTAB_WIDEN));
4809 if (remainder == 0)
4811 /* No luck there. Can we do remainder and divide at once
4812 without a library call? */
4813 remainder = gen_reg_rtx (compute_mode);
4814 if (! expand_twoval_binop ((unsignedp
4815 ? udivmod_optab
4816 : sdivmod_optab),
4817 op0, op1,
4818 NULL_RTX, remainder, unsignedp))
4819 remainder = 0;
4822 if (remainder)
4823 return gen_lowpart (mode, remainder);
4826 /* Produce the quotient. Try a quotient insn, but not a library call.
4827 If we have a divmod in this mode, use it in preference to widening
4828 the div (for this test we assume it will not fail). Note that optab2
4829 is set to the one of the two optabs that the call below will use. */
4830 quotient
4831 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4832 op0, op1, rem_flag ? NULL_RTX : target,
4833 unsignedp,
4834 ((optab2->handlers[compute_mode].insn_code
4835 != CODE_FOR_nothing)
4836 ? OPTAB_DIRECT : OPTAB_WIDEN));
4838 if (quotient == 0)
4840 /* No luck there. Try a quotient-and-remainder insn,
4841 keeping the quotient alone. */
4842 quotient = gen_reg_rtx (compute_mode);
4843 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4844 op0, op1,
4845 quotient, NULL_RTX, unsignedp))
4847 quotient = 0;
4848 if (! rem_flag)
4849 /* Still no luck. If we are not computing the remainder,
4850 use a library call for the quotient. */
4851 quotient = sign_expand_binop (compute_mode,
4852 udiv_optab, sdiv_optab,
4853 op0, op1, target,
4854 unsignedp, OPTAB_LIB_WIDEN);
4859 if (rem_flag)
4861 if (target && GET_MODE (target) != compute_mode)
4862 target = 0;
4864 if (quotient == 0)
4866 /* No divide instruction either. Use library for remainder. */
4867 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4868 op0, op1, target,
4869 unsignedp, OPTAB_LIB_WIDEN);
4870 /* No remainder function. Try a quotient-and-remainder
4871 function, keeping the remainder. */
4872 if (!remainder)
4874 remainder = gen_reg_rtx (compute_mode);
4875 if (!expand_twoval_binop_libfunc
4876 (unsignedp ? udivmod_optab : sdivmod_optab,
4877 op0, op1,
4878 NULL_RTX, remainder,
4879 unsignedp ? UMOD : MOD))
4880 remainder = NULL_RTX;
4883 else
4885 /* We divided. Now finish doing X - Y * (X / Y). */
4886 remainder = expand_mult (compute_mode, quotient, op1,
4887 NULL_RTX, unsignedp);
4888 remainder = expand_binop (compute_mode, sub_optab, op0,
4889 remainder, target, unsignedp,
4890 OPTAB_LIB_WIDEN);
4894 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4897 /* Return a tree node with data type TYPE, describing the value of X.
4898 Usually this is an VAR_DECL, if there is no obvious better choice.
4899 X may be an expression, however we only support those expressions
4900 generated by loop.c. */
4902 tree
4903 make_tree (tree type, rtx x)
4905 tree t;
4907 switch (GET_CODE (x))
4909 case CONST_INT:
4911 HOST_WIDE_INT hi = 0;
4913 if (INTVAL (x) < 0
4914 && !(TYPE_UNSIGNED (type)
4915 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4916 < HOST_BITS_PER_WIDE_INT)))
4917 hi = -1;
4919 t = build_int_cst_wide (type, INTVAL (x), hi);
4921 return t;
4924 case CONST_DOUBLE:
4925 if (GET_MODE (x) == VOIDmode)
4926 t = build_int_cst_wide (type,
4927 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4928 else
4930 REAL_VALUE_TYPE d;
4932 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4933 t = build_real (type, d);
4936 return t;
4938 case CONST_VECTOR:
4940 int i, units;
4941 rtx elt;
4942 tree t = NULL_TREE;
4944 units = CONST_VECTOR_NUNITS (x);
4946 /* Build a tree with vector elements. */
4947 for (i = units - 1; i >= 0; --i)
4949 elt = CONST_VECTOR_ELT (x, i);
4950 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4953 return build_vector (type, t);
4956 case PLUS:
4957 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4958 make_tree (type, XEXP (x, 1)));
4960 case MINUS:
4961 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4962 make_tree (type, XEXP (x, 1)));
4964 case NEG:
4965 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
4967 case MULT:
4968 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4969 make_tree (type, XEXP (x, 1)));
4971 case ASHIFT:
4972 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4973 make_tree (type, XEXP (x, 1)));
4975 case LSHIFTRT:
4976 t = lang_hooks.types.unsigned_type (type);
4977 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4978 make_tree (t, XEXP (x, 0)),
4979 make_tree (type, XEXP (x, 1))));
4981 case ASHIFTRT:
4982 t = lang_hooks.types.signed_type (type);
4983 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4984 make_tree (t, XEXP (x, 0)),
4985 make_tree (type, XEXP (x, 1))));
4987 case DIV:
4988 if (TREE_CODE (type) != REAL_TYPE)
4989 t = lang_hooks.types.signed_type (type);
4990 else
4991 t = type;
4993 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4994 make_tree (t, XEXP (x, 0)),
4995 make_tree (t, XEXP (x, 1))));
4996 case UDIV:
4997 t = lang_hooks.types.unsigned_type (type);
4998 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4999 make_tree (t, XEXP (x, 0)),
5000 make_tree (t, XEXP (x, 1))));
5002 case SIGN_EXTEND:
5003 case ZERO_EXTEND:
5004 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5005 GET_CODE (x) == ZERO_EXTEND);
5006 return fold_convert (type, make_tree (t, XEXP (x, 0)));
5008 default:
5009 t = build_decl (VAR_DECL, NULL_TREE, type);
5011 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
5012 ptr_mode. So convert. */
5013 if (POINTER_TYPE_P (type))
5014 x = convert_memory_address (TYPE_MODE (type), x);
5016 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5017 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5018 t->decl_with_rtl.rtl = x;
5020 return t;
5024 /* Check whether the multiplication X * MULT + ADD overflows.
5025 X, MULT and ADD must be CONST_*.
5026 MODE is the machine mode for the computation.
5027 X and MULT must have mode MODE. ADD may have a different mode.
5028 So can X (defaults to same as MODE).
5029 UNSIGNEDP is nonzero to do unsigned multiplication. */
5031 bool
5032 const_mult_add_overflow_p (rtx x, rtx mult, rtx add,
5033 enum machine_mode mode, int unsignedp)
5035 tree type, mult_type, add_type, result;
5037 type = lang_hooks.types.type_for_mode (mode, unsignedp);
5039 /* In order to get a proper overflow indication from an unsigned
5040 type, we have to pretend that it's a sizetype. */
5041 mult_type = type;
5042 if (unsignedp)
5044 /* FIXME:It would be nice if we could step directly from this
5045 type to its sizetype equivalent. */
5046 mult_type = build_distinct_type_copy (type);
5047 TYPE_IS_SIZETYPE (mult_type) = 1;
5050 add_type = (GET_MODE (add) == VOIDmode ? mult_type
5051 : lang_hooks.types.type_for_mode (GET_MODE (add), unsignedp));
5053 result = fold_build2 (PLUS_EXPR, mult_type,
5054 fold_build2 (MULT_EXPR, mult_type,
5055 make_tree (mult_type, x),
5056 make_tree (mult_type, mult)),
5057 make_tree (add_type, add));
5059 return TREE_CONSTANT_OVERFLOW (result);
5062 /* Return an rtx representing the value of X * MULT + ADD.
5063 TARGET is a suggestion for where to store the result (an rtx).
5064 MODE is the machine mode for the computation.
5065 X and MULT must have mode MODE. ADD may have a different mode.
5066 So can X (defaults to same as MODE).
5067 UNSIGNEDP is nonzero to do unsigned multiplication.
5068 This may emit insns. */
5071 expand_mult_add (rtx x, rtx target, rtx mult, rtx add, enum machine_mode mode,
5072 int unsignedp)
5074 tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
5075 tree add_type = (GET_MODE (add) == VOIDmode
5076 ? type: lang_hooks.types.type_for_mode (GET_MODE (add),
5077 unsignedp));
5078 tree result = fold_build2 (PLUS_EXPR, type,
5079 fold_build2 (MULT_EXPR, type,
5080 make_tree (type, x),
5081 make_tree (type, mult)),
5082 make_tree (add_type, add));
5084 return expand_expr (result, target, VOIDmode, 0);
5087 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5088 and returning TARGET.
5090 If TARGET is 0, a pseudo-register or constant is returned. */
5093 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5095 rtx tem = 0;
5097 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5098 tem = simplify_binary_operation (AND, mode, op0, op1);
5099 if (tem == 0)
5100 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5102 if (target == 0)
5103 target = tem;
5104 else if (tem != target)
5105 emit_move_insn (target, tem);
5106 return target;
5109 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5110 and storing in TARGET. Normally return TARGET.
5111 Return 0 if that cannot be done.
5113 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5114 it is VOIDmode, they cannot both be CONST_INT.
5116 UNSIGNEDP is for the case where we have to widen the operands
5117 to perform the operation. It says to use zero-extension.
5119 NORMALIZEP is 1 if we should convert the result to be either zero
5120 or one. Normalize is -1 if we should convert the result to be
5121 either zero or -1. If NORMALIZEP is zero, the result will be left
5122 "raw" out of the scc insn. */
5125 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5126 enum machine_mode mode, int unsignedp, int normalizep)
5128 rtx subtarget;
5129 enum insn_code icode;
5130 enum machine_mode compare_mode;
5131 enum machine_mode target_mode = GET_MODE (target);
5132 rtx tem;
5133 rtx last = get_last_insn ();
5134 rtx pattern, comparison;
5136 if (unsignedp)
5137 code = unsigned_condition (code);
5139 /* If one operand is constant, make it the second one. Only do this
5140 if the other operand is not constant as well. */
5142 if (swap_commutative_operands_p (op0, op1))
5144 tem = op0;
5145 op0 = op1;
5146 op1 = tem;
5147 code = swap_condition (code);
5150 if (mode == VOIDmode)
5151 mode = GET_MODE (op0);
5153 /* For some comparisons with 1 and -1, we can convert this to
5154 comparisons with zero. This will often produce more opportunities for
5155 store-flag insns. */
5157 switch (code)
5159 case LT:
5160 if (op1 == const1_rtx)
5161 op1 = const0_rtx, code = LE;
5162 break;
5163 case LE:
5164 if (op1 == constm1_rtx)
5165 op1 = const0_rtx, code = LT;
5166 break;
5167 case GE:
5168 if (op1 == const1_rtx)
5169 op1 = const0_rtx, code = GT;
5170 break;
5171 case GT:
5172 if (op1 == constm1_rtx)
5173 op1 = const0_rtx, code = GE;
5174 break;
5175 case GEU:
5176 if (op1 == const1_rtx)
5177 op1 = const0_rtx, code = NE;
5178 break;
5179 case LTU:
5180 if (op1 == const1_rtx)
5181 op1 = const0_rtx, code = EQ;
5182 break;
5183 default:
5184 break;
5187 /* If we are comparing a double-word integer with zero or -1, we can
5188 convert the comparison into one involving a single word. */
5189 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5190 && GET_MODE_CLASS (mode) == MODE_INT
5191 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5193 if ((code == EQ || code == NE)
5194 && (op1 == const0_rtx || op1 == constm1_rtx))
5196 rtx op00, op01, op0both;
5198 /* Do a logical OR or AND of the two words and compare the result. */
5199 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5200 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5201 op0both = expand_binop (word_mode,
5202 op1 == const0_rtx ? ior_optab : and_optab,
5203 op00, op01, NULL_RTX, unsignedp, OPTAB_DIRECT);
5205 if (op0both != 0)
5206 return emit_store_flag (target, code, op0both, op1, word_mode,
5207 unsignedp, normalizep);
5209 else if ((code == LT || code == GE) && op1 == const0_rtx)
5211 rtx op0h;
5213 /* If testing the sign bit, can just test on high word. */
5214 op0h = simplify_gen_subreg (word_mode, op0, mode,
5215 subreg_highpart_offset (word_mode, mode));
5216 return emit_store_flag (target, code, op0h, op1, word_mode,
5217 unsignedp, normalizep);
5221 /* From now on, we won't change CODE, so set ICODE now. */
5222 icode = setcc_gen_code[(int) code];
5224 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5225 complement of A (for GE) and shifting the sign bit to the low bit. */
5226 if (op1 == const0_rtx && (code == LT || code == GE)
5227 && GET_MODE_CLASS (mode) == MODE_INT
5228 && (normalizep || STORE_FLAG_VALUE == 1
5229 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5230 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5231 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
5233 subtarget = target;
5235 /* If the result is to be wider than OP0, it is best to convert it
5236 first. If it is to be narrower, it is *incorrect* to convert it
5237 first. */
5238 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5240 op0 = convert_modes (target_mode, mode, op0, 0);
5241 mode = target_mode;
5244 if (target_mode != mode)
5245 subtarget = 0;
5247 if (code == GE)
5248 op0 = expand_unop (mode, one_cmpl_optab, op0,
5249 ((STORE_FLAG_VALUE == 1 || normalizep)
5250 ? 0 : subtarget), 0);
5252 if (STORE_FLAG_VALUE == 1 || normalizep)
5253 /* If we are supposed to produce a 0/1 value, we want to do
5254 a logical shift from the sign bit to the low-order bit; for
5255 a -1/0 value, we do an arithmetic shift. */
5256 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5257 size_int (GET_MODE_BITSIZE (mode) - 1),
5258 subtarget, normalizep != -1);
5260 if (mode != target_mode)
5261 op0 = convert_modes (target_mode, mode, op0, 0);
5263 return op0;
5266 if (icode != CODE_FOR_nothing)
5268 insn_operand_predicate_fn pred;
5270 /* We think we may be able to do this with a scc insn. Emit the
5271 comparison and then the scc insn. */
5273 do_pending_stack_adjust ();
5274 last = get_last_insn ();
5276 comparison
5277 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
5278 if (CONSTANT_P (comparison))
5280 switch (GET_CODE (comparison))
5282 case CONST_INT:
5283 if (comparison == const0_rtx)
5284 return const0_rtx;
5285 break;
5287 #ifdef FLOAT_STORE_FLAG_VALUE
5288 case CONST_DOUBLE:
5289 if (comparison == CONST0_RTX (GET_MODE (comparison)))
5290 return const0_rtx;
5291 break;
5292 #endif
5293 default:
5294 gcc_unreachable ();
5297 if (normalizep == 1)
5298 return const1_rtx;
5299 if (normalizep == -1)
5300 return constm1_rtx;
5301 return const_true_rtx;
5304 /* The code of COMPARISON may not match CODE if compare_from_rtx
5305 decided to swap its operands and reverse the original code.
5307 We know that compare_from_rtx returns either a CONST_INT or
5308 a new comparison code, so it is safe to just extract the
5309 code from COMPARISON. */
5310 code = GET_CODE (comparison);
5312 /* Get a reference to the target in the proper mode for this insn. */
5313 compare_mode = insn_data[(int) icode].operand[0].mode;
5314 subtarget = target;
5315 pred = insn_data[(int) icode].operand[0].predicate;
5316 if (optimize || ! (*pred) (subtarget, compare_mode))
5317 subtarget = gen_reg_rtx (compare_mode);
5319 pattern = GEN_FCN (icode) (subtarget);
5320 if (pattern)
5322 emit_insn (pattern);
5324 /* If we are converting to a wider mode, first convert to
5325 TARGET_MODE, then normalize. This produces better combining
5326 opportunities on machines that have a SIGN_EXTRACT when we are
5327 testing a single bit. This mostly benefits the 68k.
5329 If STORE_FLAG_VALUE does not have the sign bit set when
5330 interpreted in COMPARE_MODE, we can do this conversion as
5331 unsigned, which is usually more efficient. */
5332 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
5334 convert_move (target, subtarget,
5335 (GET_MODE_BITSIZE (compare_mode)
5336 <= HOST_BITS_PER_WIDE_INT)
5337 && 0 == (STORE_FLAG_VALUE
5338 & ((HOST_WIDE_INT) 1
5339 << (GET_MODE_BITSIZE (compare_mode) -1))));
5340 op0 = target;
5341 compare_mode = target_mode;
5343 else
5344 op0 = subtarget;
5346 /* If we want to keep subexpressions around, don't reuse our
5347 last target. */
5349 if (optimize)
5350 subtarget = 0;
5352 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
5353 we don't have to do anything. */
5354 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5356 /* STORE_FLAG_VALUE might be the most negative number, so write
5357 the comparison this way to avoid a compiler-time warning. */
5358 else if (- normalizep == STORE_FLAG_VALUE)
5359 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
5361 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
5362 makes it hard to use a value of just the sign bit due to
5363 ANSI integer constant typing rules. */
5364 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
5365 && (STORE_FLAG_VALUE
5366 & ((HOST_WIDE_INT) 1
5367 << (GET_MODE_BITSIZE (compare_mode) - 1))))
5368 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
5369 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
5370 subtarget, normalizep == 1);
5371 else
5373 gcc_assert (STORE_FLAG_VALUE & 1);
5375 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
5376 if (normalizep == -1)
5377 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
5380 /* If we were converting to a smaller mode, do the
5381 conversion now. */
5382 if (target_mode != compare_mode)
5384 convert_move (target, op0, 0);
5385 return target;
5387 else
5388 return op0;
5392 delete_insns_since (last);
5394 /* If optimizing, use different pseudo registers for each insn, instead
5395 of reusing the same pseudo. This leads to better CSE, but slows
5396 down the compiler, since there are more pseudos */
5397 subtarget = (!optimize
5398 && (target_mode == mode)) ? target : NULL_RTX;
5400 /* If we reached here, we can't do this with a scc insn. However, there
5401 are some comparisons that can be done directly. For example, if
5402 this is an equality comparison of integers, we can try to exclusive-or
5403 (or subtract) the two operands and use a recursive call to try the
5404 comparison with zero. Don't do any of these cases if branches are
5405 very cheap. */
5407 if (BRANCH_COST > 0
5408 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
5409 && op1 != const0_rtx)
5411 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5412 OPTAB_WIDEN);
5414 if (tem == 0)
5415 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5416 OPTAB_WIDEN);
5417 if (tem != 0)
5418 tem = emit_store_flag (target, code, tem, const0_rtx,
5419 mode, unsignedp, normalizep);
5420 if (tem == 0)
5421 delete_insns_since (last);
5422 return tem;
5425 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5426 the constant zero. Reject all other comparisons at this point. Only
5427 do LE and GT if branches are expensive since they are expensive on
5428 2-operand machines. */
5430 if (BRANCH_COST == 0
5431 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
5432 || (code != EQ && code != NE
5433 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
5434 return 0;
5436 /* See what we need to return. We can only return a 1, -1, or the
5437 sign bit. */
5439 if (normalizep == 0)
5441 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5442 normalizep = STORE_FLAG_VALUE;
5444 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5445 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5446 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5448 else
5449 return 0;
5452 /* Try to put the result of the comparison in the sign bit. Assume we can't
5453 do the necessary operation below. */
5455 tem = 0;
5457 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5458 the sign bit set. */
5460 if (code == LE)
5462 /* This is destructive, so SUBTARGET can't be OP0. */
5463 if (rtx_equal_p (subtarget, op0))
5464 subtarget = 0;
5466 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5467 OPTAB_WIDEN);
5468 if (tem)
5469 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5470 OPTAB_WIDEN);
5473 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5474 number of bits in the mode of OP0, minus one. */
5476 if (code == GT)
5478 if (rtx_equal_p (subtarget, op0))
5479 subtarget = 0;
5481 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5482 size_int (GET_MODE_BITSIZE (mode) - 1),
5483 subtarget, 0);
5484 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5485 OPTAB_WIDEN);
5488 if (code == EQ || code == NE)
5490 /* For EQ or NE, one way to do the comparison is to apply an operation
5491 that converts the operand into a positive number if it is nonzero
5492 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5493 for NE we negate. This puts the result in the sign bit. Then we
5494 normalize with a shift, if needed.
5496 Two operations that can do the above actions are ABS and FFS, so try
5497 them. If that doesn't work, and MODE is smaller than a full word,
5498 we can use zero-extension to the wider mode (an unsigned conversion)
5499 as the operation. */
5501 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5502 that is compensated by the subsequent overflow when subtracting
5503 one / negating. */
5505 if (abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
5506 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5507 else if (ffs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
5508 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5509 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5511 tem = convert_modes (word_mode, mode, op0, 1);
5512 mode = word_mode;
5515 if (tem != 0)
5517 if (code == EQ)
5518 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5519 0, OPTAB_WIDEN);
5520 else
5521 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5524 /* If we couldn't do it that way, for NE we can "or" the two's complement
5525 of the value with itself. For EQ, we take the one's complement of
5526 that "or", which is an extra insn, so we only handle EQ if branches
5527 are expensive. */
5529 if (tem == 0 && (code == NE || BRANCH_COST > 1))
5531 if (rtx_equal_p (subtarget, op0))
5532 subtarget = 0;
5534 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5535 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5536 OPTAB_WIDEN);
5538 if (tem && code == EQ)
5539 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5543 if (tem && normalizep)
5544 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5545 size_int (GET_MODE_BITSIZE (mode) - 1),
5546 subtarget, normalizep == 1);
5548 if (tem)
5550 if (GET_MODE (tem) != target_mode)
5552 convert_move (target, tem, 0);
5553 tem = target;
5555 else if (!subtarget)
5557 emit_move_insn (target, tem);
5558 tem = target;
5561 else
5562 delete_insns_since (last);
5564 return tem;
5567 /* Like emit_store_flag, but always succeeds. */
5570 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5571 enum machine_mode mode, int unsignedp, int normalizep)
5573 rtx tem, label;
5575 /* First see if emit_store_flag can do the job. */
5576 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5577 if (tem != 0)
5578 return tem;
5580 if (normalizep == 0)
5581 normalizep = 1;
5583 /* If this failed, we have to do this with set/compare/jump/set code. */
5585 if (!REG_P (target)
5586 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5587 target = gen_reg_rtx (GET_MODE (target));
5589 emit_move_insn (target, const1_rtx);
5590 label = gen_label_rtx ();
5591 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5592 NULL_RTX, label);
5594 emit_move_insn (target, const0_rtx);
5595 emit_label (label);
5597 return target;
5600 /* Perform possibly multi-word comparison and conditional jump to LABEL
5601 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
5603 The algorithm is based on the code in expr.c:do_jump.
5605 Note that this does not perform a general comparison. Only
5606 variants generated within expmed.c are correctly handled, others
5607 could be handled if needed. */
5609 static void
5610 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5611 rtx label)
5613 /* If this mode is an integer too wide to compare properly,
5614 compare word by word. Rely on cse to optimize constant cases. */
5616 if (GET_MODE_CLASS (mode) == MODE_INT
5617 && ! can_compare_p (op, mode, ccp_jump))
5619 rtx label2 = gen_label_rtx ();
5621 switch (op)
5623 case LTU:
5624 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
5625 break;
5627 case LEU:
5628 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
5629 break;
5631 case LT:
5632 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
5633 break;
5635 case GT:
5636 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
5637 break;
5639 case GE:
5640 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
5641 break;
5643 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
5644 that's the only equality operations we do */
5645 case EQ:
5646 gcc_assert (arg2 == const0_rtx && mode == GET_MODE(arg1));
5647 do_jump_by_parts_equality_rtx (arg1, label2, label);
5648 break;
5650 case NE:
5651 gcc_assert (arg2 == const0_rtx && mode == GET_MODE(arg1));
5652 do_jump_by_parts_equality_rtx (arg1, label, label2);
5653 break;
5655 default:
5656 gcc_unreachable ();
5659 emit_label (label2);
5661 else
5662 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);