* doc/gcc.texi, doc/install.texi, doc/invoke.texi: Remove trailing
[official-gcc.git] / gcc / expmed.c
blobb08d5353fa7cd4e913087931d3e06d36ae2454fe
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001 Free Software Foundation, Inc.
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "toplev.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "expr.h"
33 #include "real.h"
34 #include "recog.h"
36 static void store_fixed_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
37 unsigned HOST_WIDE_INT,
38 unsigned HOST_WIDE_INT, rtx,
39 unsigned int));
40 static void store_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
41 unsigned HOST_WIDE_INT, rtx,
42 unsigned int));
43 static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx,
44 unsigned HOST_WIDE_INT,
45 unsigned HOST_WIDE_INT,
46 unsigned HOST_WIDE_INT,
47 rtx, int, unsigned int));
48 static rtx mask_rtx PARAMS ((enum machine_mode, int,
49 int, int));
50 static rtx lshift_value PARAMS ((enum machine_mode, rtx,
51 int, int));
52 static rtx extract_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
53 unsigned HOST_WIDE_INT, int,
54 unsigned int));
55 static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
56 enum machine_mode, rtx));
58 /* Non-zero means divides or modulus operations are relatively cheap for
59 powers of two, so don't use branches; emit the operation instead.
60 Usually, this will mean that the MD file will emit non-branch
61 sequences. */
63 static int sdiv_pow2_cheap, smod_pow2_cheap;
65 #ifndef SLOW_UNALIGNED_ACCESS
66 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
67 #endif
69 /* For compilers that support multiple targets with different word sizes,
70 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
71 is the H8/300(H) compiler. */
73 #ifndef MAX_BITS_PER_WORD
74 #define MAX_BITS_PER_WORD BITS_PER_WORD
75 #endif
77 /* Cost of various pieces of RTL. Note that some of these are indexed by
78 shift count and some by mode. */
79 static int add_cost, negate_cost, zero_cost;
80 static int shift_cost[MAX_BITS_PER_WORD];
81 static int shiftadd_cost[MAX_BITS_PER_WORD];
82 static int shiftsub_cost[MAX_BITS_PER_WORD];
83 static int mul_cost[NUM_MACHINE_MODES];
84 static int div_cost[NUM_MACHINE_MODES];
85 static int mul_widen_cost[NUM_MACHINE_MODES];
86 static int mul_highpart_cost[NUM_MACHINE_MODES];
88 void
89 init_expmed ()
91 /* This is "some random pseudo register" for purposes of calling recog
92 to see what insns exist. */
93 rtx reg = gen_rtx_REG (word_mode, 10000);
94 rtx shift_insn, shiftadd_insn, shiftsub_insn;
95 int dummy;
96 int m;
97 enum machine_mode mode, wider_mode;
99 start_sequence ();
101 reg = gen_rtx_REG (word_mode, 10000);
103 zero_cost = rtx_cost (const0_rtx, 0);
104 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
106 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
107 gen_rtx_ASHIFT (word_mode, reg,
108 const0_rtx)));
110 shiftadd_insn
111 = emit_insn (gen_rtx_SET (VOIDmode, reg,
112 gen_rtx_PLUS (word_mode,
113 gen_rtx_MULT (word_mode,
114 reg, const0_rtx),
115 reg)));
117 shiftsub_insn
118 = emit_insn (gen_rtx_SET (VOIDmode, reg,
119 gen_rtx_MINUS (word_mode,
120 gen_rtx_MULT (word_mode,
121 reg, const0_rtx),
122 reg)));
124 init_recog ();
126 shift_cost[0] = 0;
127 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
129 for (m = 1; m < MAX_BITS_PER_WORD; m++)
131 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
133 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
134 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
135 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
137 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
138 = GEN_INT ((HOST_WIDE_INT) 1 << m);
139 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
140 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
142 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
143 = GEN_INT ((HOST_WIDE_INT) 1 << m);
144 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
145 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
148 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
150 sdiv_pow2_cheap
151 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
152 <= 2 * add_cost);
153 smod_pow2_cheap
154 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
155 <= 2 * add_cost);
157 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
158 mode != VOIDmode;
159 mode = GET_MODE_WIDER_MODE (mode))
161 reg = gen_rtx_REG (mode, 10000);
162 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
163 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
164 wider_mode = GET_MODE_WIDER_MODE (mode);
165 if (wider_mode != VOIDmode)
167 mul_widen_cost[(int) wider_mode]
168 = rtx_cost (gen_rtx_MULT (wider_mode,
169 gen_rtx_ZERO_EXTEND (wider_mode, reg),
170 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
171 SET);
172 mul_highpart_cost[(int) mode]
173 = rtx_cost (gen_rtx_TRUNCATE
174 (mode,
175 gen_rtx_LSHIFTRT (wider_mode,
176 gen_rtx_MULT (wider_mode,
177 gen_rtx_ZERO_EXTEND
178 (wider_mode, reg),
179 gen_rtx_ZERO_EXTEND
180 (wider_mode, reg)),
181 GEN_INT (GET_MODE_BITSIZE (mode)))),
182 SET);
186 end_sequence ();
189 /* Return an rtx representing minus the value of X.
190 MODE is the intended mode of the result,
191 useful if X is a CONST_INT. */
194 negate_rtx (mode, x)
195 enum machine_mode mode;
196 rtx x;
198 rtx result = simplify_unary_operation (NEG, mode, x, mode);
200 if (result == 0)
201 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
203 return result;
206 /* Generate code to store value from rtx VALUE
207 into a bit-field within structure STR_RTX
208 containing BITSIZE bits starting at bit BITNUM.
209 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
210 ALIGN is the alignment that STR_RTX is known to have.
211 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
213 /* ??? Note that there are two different ideas here for how
214 to determine the size to count bits within, for a register.
215 One is BITS_PER_WORD, and the other is the size of operand 3
216 of the insv pattern.
218 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
219 else, we use the mode of operand 3. */
222 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
223 rtx str_rtx;
224 unsigned HOST_WIDE_INT bitsize;
225 unsigned HOST_WIDE_INT bitnum;
226 enum machine_mode fieldmode;
227 rtx value;
228 unsigned int align;
229 HOST_WIDE_INT total_size;
231 unsigned int unit
232 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
233 unsigned HOST_WIDE_INT offset = bitnum / unit;
234 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
235 register rtx op0 = str_rtx;
236 #ifdef HAVE_insv
237 unsigned HOST_WIDE_INT insv_bitsize;
238 enum machine_mode op_mode;
240 op_mode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
241 if (op_mode == VOIDmode)
242 op_mode = word_mode;
243 insv_bitsize = GET_MODE_BITSIZE (op_mode);
244 #endif
246 /* It is wrong to have align==0, since every object is aligned at
247 least at a bit boundary. This usually means a bug elsewhere. */
248 if (align == 0)
249 abort ();
251 /* Discount the part of the structure before the desired byte.
252 We need to know how many bytes are safe to reference after it. */
253 if (total_size >= 0)
254 total_size -= (bitpos / BIGGEST_ALIGNMENT
255 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
257 while (GET_CODE (op0) == SUBREG)
259 /* The following line once was done only if WORDS_BIG_ENDIAN,
260 but I think that is a mistake. WORDS_BIG_ENDIAN is
261 meaningful at a much higher level; when structures are copied
262 between memory and regs, the higher-numbered regs
263 always get higher addresses. */
264 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
265 /* We used to adjust BITPOS here, but now we do the whole adjustment
266 right after the loop. */
267 op0 = SUBREG_REG (op0);
270 /* If OP0 is a register, BITPOS must count within a word.
271 But as we have it, it counts within whatever size OP0 now has.
272 On a bigendian machine, these are not the same, so convert. */
273 if (BYTES_BIG_ENDIAN
274 && GET_CODE (op0) != MEM
275 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
276 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
278 value = protect_from_queue (value, 0);
280 if (flag_force_mem)
281 value = force_not_mem (value);
283 /* If the target is a register, overwriting the entire object, or storing
284 a full-word or multi-word field can be done with just a SUBREG.
286 If the target is memory, storing any naturally aligned field can be
287 done with a simple store. For targets that support fast unaligned
288 memory, any naturally sized, unit aligned field can be done directly. */
290 if (bitsize == GET_MODE_BITSIZE (fieldmode)
291 && (GET_CODE (op0) != MEM
292 ? (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
293 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
294 : (! SLOW_UNALIGNED_ACCESS (fieldmode, align)
295 || (offset * BITS_PER_UNIT % bitsize == 0
296 && align % GET_MODE_BITSIZE (fieldmode) == 0)))
297 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0))
299 if (GET_MODE (op0) != fieldmode)
301 if (GET_CODE (op0) == SUBREG)
303 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
304 || GET_MODE_CLASS (fieldmode) == MODE_INT
305 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
306 op0 = SUBREG_REG (op0);
307 else
308 /* Else we've got some float mode source being extracted into
309 a different float mode destination -- this combination of
310 subregs results in Severe Tire Damage. */
311 abort ();
313 if (GET_CODE (op0) == REG)
314 op0 = gen_rtx_SUBREG (fieldmode, op0,
315 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
316 + (offset * UNITS_PER_WORD));
317 else
318 op0 = change_address (op0, fieldmode,
319 plus_constant (XEXP (op0, 0), offset));
321 emit_move_insn (op0, value);
322 return value;
325 /* Make sure we are playing with integral modes. Pun with subregs
326 if we aren't. This must come after the entire register case above,
327 since that case is valid for any mode. The following cases are only
328 valid for integral modes. */
330 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
331 if (imode != GET_MODE (op0))
333 if (GET_CODE (op0) == MEM)
334 op0 = change_address (op0, imode, NULL_RTX);
335 else if (imode != BLKmode)
336 op0 = gen_lowpart (imode, op0);
337 else
338 abort ();
342 /* Storing an lsb-aligned field in a register
343 can be done with a movestrict instruction. */
345 if (GET_CODE (op0) != MEM
346 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
347 && bitsize == GET_MODE_BITSIZE (fieldmode)
348 && (movstrict_optab->handlers[(int) fieldmode].insn_code
349 != CODE_FOR_nothing))
351 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
353 /* Get appropriate low part of the value being stored. */
354 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
355 value = gen_lowpart (fieldmode, value);
356 else if (!(GET_CODE (value) == SYMBOL_REF
357 || GET_CODE (value) == LABEL_REF
358 || GET_CODE (value) == CONST))
359 value = convert_to_mode (fieldmode, value, 0);
361 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
362 value = copy_to_mode_reg (fieldmode, value);
364 if (GET_CODE (op0) == SUBREG)
366 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
367 || GET_MODE_CLASS (fieldmode) == MODE_INT
368 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
369 op0 = SUBREG_REG (op0);
370 else
371 /* Else we've got some float mode source being extracted into
372 a different float mode destination -- this combination of
373 subregs results in Severe Tire Damage. */
374 abort ();
377 emit_insn (GEN_FCN (icode)
378 (gen_rtx_SUBREG (fieldmode, op0,
379 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
380 + (offset * UNITS_PER_WORD)),
381 value));
383 return value;
386 /* Handle fields bigger than a word. */
388 if (bitsize > BITS_PER_WORD)
390 /* Here we transfer the words of the field
391 in the order least significant first.
392 This is because the most significant word is the one which may
393 be less than full.
394 However, only do that if the value is not BLKmode. */
396 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
397 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
398 unsigned int i;
400 /* This is the mode we must force value to, so that there will be enough
401 subwords to extract. Note that fieldmode will often (always?) be
402 VOIDmode, because that is what store_field uses to indicate that this
403 is a bit field, but passing VOIDmode to operand_subword_force will
404 result in an abort. */
405 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
407 for (i = 0; i < nwords; i++)
409 /* If I is 0, use the low-order word in both field and target;
410 if I is 1, use the next to lowest word; and so on. */
411 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
412 unsigned int bit_offset = (backwards
413 ? MAX ((int) bitsize - ((int) i + 1)
414 * BITS_PER_WORD,
416 : (int) i * BITS_PER_WORD);
418 store_bit_field (op0, MIN (BITS_PER_WORD,
419 bitsize - i * BITS_PER_WORD),
420 bitnum + bit_offset, word_mode,
421 operand_subword_force (value, wordnum,
422 (GET_MODE (value) == VOIDmode
423 ? fieldmode
424 : GET_MODE (value))),
425 align, total_size);
427 return value;
430 /* From here on we can assume that the field to be stored in is
431 a full-word (whatever type that is), since it is shorter than a word. */
433 /* OFFSET is the number of words or bytes (UNIT says which)
434 from STR_RTX to the first word or byte containing part of the field. */
436 if (GET_CODE (op0) != MEM)
438 if (offset != 0
439 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
441 if (GET_CODE (op0) != REG)
443 /* Since this is a destination (lvalue), we can't copy it to a
444 pseudo. We can trivially remove a SUBREG that does not
445 change the size of the operand. Such a SUBREG may have been
446 added above. Otherwise, abort. */
447 if (GET_CODE (op0) == SUBREG
448 && (GET_MODE_SIZE (GET_MODE (op0))
449 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
450 op0 = SUBREG_REG (op0);
451 else
452 abort ();
454 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
455 op0, (offset * UNITS_PER_WORD));
457 offset = 0;
459 else
461 op0 = protect_from_queue (op0, 1);
464 /* If VALUE is a floating-point mode, access it as an integer of the
465 corresponding size. This can occur on a machine with 64 bit registers
466 that uses SFmode for float. This can also occur for unaligned float
467 structure fields. */
468 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
470 if (GET_CODE (value) != REG)
471 value = copy_to_reg (value);
472 value = gen_rtx_SUBREG (word_mode, value, 0);
475 /* Now OFFSET is nonzero only if OP0 is memory
476 and is therefore always measured in bytes. */
478 #ifdef HAVE_insv
479 if (HAVE_insv
480 && GET_MODE (value) != BLKmode
481 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
482 /* Ensure insv's size is wide enough for this field. */
483 && (insv_bitsize >= bitsize)
484 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
485 && (bitsize + bitpos > insv_bitsize)))
487 int xbitpos = bitpos;
488 rtx value1;
489 rtx xop0 = op0;
490 rtx last = get_last_insn ();
491 rtx pat;
492 enum machine_mode maxmode;
493 int save_volatile_ok = volatile_ok;
495 maxmode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
496 if (maxmode == VOIDmode)
497 maxmode = word_mode;
499 volatile_ok = 1;
501 /* If this machine's insv can only insert into a register, copy OP0
502 into a register and save it back later. */
503 /* This used to check flag_force_mem, but that was a serious
504 de-optimization now that flag_force_mem is enabled by -O2. */
505 if (GET_CODE (op0) == MEM
506 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
507 (op0, VOIDmode)))
509 rtx tempreg;
510 enum machine_mode bestmode;
512 /* Get the mode to use for inserting into this field. If OP0 is
513 BLKmode, get the smallest mode consistent with the alignment. If
514 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
515 mode. Otherwise, use the smallest mode containing the field. */
517 if (GET_MODE (op0) == BLKmode
518 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
519 bestmode
520 = get_best_mode (bitsize, bitnum, align, maxmode,
521 MEM_VOLATILE_P (op0));
522 else
523 bestmode = GET_MODE (op0);
525 if (bestmode == VOIDmode
526 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
527 && GET_MODE_BITSIZE (bestmode) > align))
528 goto insv_loses;
530 /* Adjust address to point to the containing unit of that mode. */
531 unit = GET_MODE_BITSIZE (bestmode);
532 /* Compute offset as multiple of this unit, counting in bytes. */
533 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
534 bitpos = bitnum % unit;
535 op0 = change_address (op0, bestmode,
536 plus_constant (XEXP (op0, 0), offset));
538 /* Fetch that unit, store the bitfield in it, then store
539 the unit. */
540 tempreg = copy_to_reg (op0);
541 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
542 align, total_size);
543 emit_move_insn (op0, tempreg);
544 return value;
546 volatile_ok = save_volatile_ok;
548 /* Add OFFSET into OP0's address. */
549 if (GET_CODE (xop0) == MEM)
550 xop0 = change_address (xop0, byte_mode,
551 plus_constant (XEXP (xop0, 0), offset));
553 /* If xop0 is a register, we need it in MAXMODE
554 to make it acceptable to the format of insv. */
555 if (GET_CODE (xop0) == SUBREG)
556 /* We can't just change the mode, because this might clobber op0,
557 and we will need the original value of op0 if insv fails. */
558 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
559 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
560 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
562 /* On big-endian machines, we count bits from the most significant.
563 If the bit field insn does not, we must invert. */
565 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
566 xbitpos = unit - bitsize - xbitpos;
568 /* We have been counting XBITPOS within UNIT.
569 Count instead within the size of the register. */
570 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
571 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
573 unit = GET_MODE_BITSIZE (maxmode);
575 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
576 value1 = value;
577 if (GET_MODE (value) != maxmode)
579 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
581 /* Optimization: Don't bother really extending VALUE
582 if it has all the bits we will actually use. However,
583 if we must narrow it, be sure we do it correctly. */
585 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
587 /* Avoid making subreg of a subreg, or of a mem. */
588 if (GET_CODE (value1) != REG)
589 value1 = copy_to_reg (value1);
590 value1 = gen_rtx_SUBREG (maxmode, value1, 0);
592 else
593 value1 = gen_lowpart (maxmode, value1);
595 else if (GET_CODE (value) == CONST_INT)
596 value1 = GEN_INT (trunc_int_for_mode (INTVAL (value), maxmode));
597 else if (!CONSTANT_P (value))
598 /* Parse phase is supposed to make VALUE's data type
599 match that of the component reference, which is a type
600 at least as wide as the field; so VALUE should have
601 a mode that corresponds to that type. */
602 abort ();
605 /* If this machine's insv insists on a register,
606 get VALUE1 into a register. */
607 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
608 (value1, maxmode)))
609 value1 = force_reg (maxmode, value1);
611 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
612 if (pat)
613 emit_insn (pat);
614 else
616 delete_insns_since (last);
617 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
620 else
621 insv_loses:
622 #endif
623 /* Insv is not available; store using shifts and boolean ops. */
624 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
625 return value;
628 /* Use shifts and boolean operations to store VALUE
629 into a bit field of width BITSIZE
630 in a memory location specified by OP0 except offset by OFFSET bytes.
631 (OFFSET must be 0 if OP0 is a register.)
632 The field starts at position BITPOS within the byte.
633 (If OP0 is a register, it may be a full word or a narrower mode,
634 but BITPOS still counts within a full word,
635 which is significant on bigendian machines.)
636 STRUCT_ALIGN is the alignment the structure is known to have.
638 Note that protect_from_queue has already been done on OP0 and VALUE. */
640 static void
641 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
642 register rtx op0;
643 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
644 register rtx value;
645 unsigned int struct_align;
647 register enum machine_mode mode;
648 unsigned int total_bits = BITS_PER_WORD;
649 rtx subtarget, temp;
650 int all_zero = 0;
651 int all_one = 0;
653 if (! SLOW_UNALIGNED_ACCESS (word_mode, struct_align))
654 struct_align = BIGGEST_ALIGNMENT;
656 /* There is a case not handled here:
657 a structure with a known alignment of just a halfword
658 and a field split across two aligned halfwords within the structure.
659 Or likewise a structure with a known alignment of just a byte
660 and a field split across two bytes.
661 Such cases are not supposed to be able to occur. */
663 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
665 if (offset != 0)
666 abort ();
667 /* Special treatment for a bit field split across two registers. */
668 if (bitsize + bitpos > BITS_PER_WORD)
670 store_split_bit_field (op0, bitsize, bitpos,
671 value, BITS_PER_WORD);
672 return;
675 else
677 /* Get the proper mode to use for this field. We want a mode that
678 includes the entire field. If such a mode would be larger than
679 a word, we won't be doing the extraction the normal way. */
681 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
682 struct_align, word_mode,
683 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
685 if (mode == VOIDmode)
687 /* The only way this should occur is if the field spans word
688 boundaries. */
689 store_split_bit_field (op0,
690 bitsize, bitpos + offset * BITS_PER_UNIT,
691 value, struct_align);
692 return;
695 total_bits = GET_MODE_BITSIZE (mode);
697 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
698 be in the range 0 to total_bits-1, and put any excess bytes in
699 OFFSET. */
700 if (bitpos >= total_bits)
702 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
703 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
704 * BITS_PER_UNIT);
707 /* Get ref to an aligned byte, halfword, or word containing the field.
708 Adjust BITPOS to be position within a word,
709 and OFFSET to be the offset of that word.
710 Then alter OP0 to refer to that word. */
711 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
712 offset -= (offset % (total_bits / BITS_PER_UNIT));
713 op0 = change_address (op0, mode,
714 plus_constant (XEXP (op0, 0), offset));
717 mode = GET_MODE (op0);
719 /* Now MODE is either some integral mode for a MEM as OP0,
720 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
721 The bit field is contained entirely within OP0.
722 BITPOS is the starting bit number within OP0.
723 (OP0's mode may actually be narrower than MODE.) */
725 if (BYTES_BIG_ENDIAN)
726 /* BITPOS is the distance between our msb
727 and that of the containing datum.
728 Convert it to the distance from the lsb. */
729 bitpos = total_bits - bitsize - bitpos;
731 /* Now BITPOS is always the distance between our lsb
732 and that of OP0. */
734 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
735 we must first convert its mode to MODE. */
737 if (GET_CODE (value) == CONST_INT)
739 register HOST_WIDE_INT v = INTVAL (value);
741 if (bitsize < HOST_BITS_PER_WIDE_INT)
742 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
744 if (v == 0)
745 all_zero = 1;
746 else if ((bitsize < HOST_BITS_PER_WIDE_INT
747 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
748 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
749 all_one = 1;
751 value = lshift_value (mode, value, bitpos, bitsize);
753 else
755 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
756 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
758 if (GET_MODE (value) != mode)
760 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
761 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
762 value = gen_lowpart (mode, value);
763 else
764 value = convert_to_mode (mode, value, 1);
767 if (must_and)
768 value = expand_binop (mode, and_optab, value,
769 mask_rtx (mode, 0, bitsize, 0),
770 NULL_RTX, 1, OPTAB_LIB_WIDEN);
771 if (bitpos > 0)
772 value = expand_shift (LSHIFT_EXPR, mode, value,
773 build_int_2 (bitpos, 0), NULL_RTX, 1);
776 /* Now clear the chosen bits in OP0,
777 except that if VALUE is -1 we need not bother. */
779 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
781 if (! all_one)
783 temp = expand_binop (mode, and_optab, op0,
784 mask_rtx (mode, bitpos, bitsize, 1),
785 subtarget, 1, OPTAB_LIB_WIDEN);
786 subtarget = temp;
788 else
789 temp = op0;
791 /* Now logical-or VALUE into OP0, unless it is zero. */
793 if (! all_zero)
794 temp = expand_binop (mode, ior_optab, temp, value,
795 subtarget, 1, OPTAB_LIB_WIDEN);
796 if (op0 != temp)
797 emit_move_insn (op0, temp);
800 /* Store a bit field that is split across multiple accessible memory objects.
802 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
803 BITSIZE is the field width; BITPOS the position of its first bit
804 (within the word).
805 VALUE is the value to store.
806 ALIGN is the known alignment of OP0.
807 This is also the size of the memory objects to be used.
809 This does not yet handle fields wider than BITS_PER_WORD. */
811 static void
812 store_split_bit_field (op0, bitsize, bitpos, value, align)
813 rtx op0;
814 unsigned HOST_WIDE_INT bitsize, bitpos;
815 rtx value;
816 unsigned int align;
818 unsigned int unit;
819 unsigned int bitsdone = 0;
821 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
822 much at a time. */
823 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
824 unit = BITS_PER_WORD;
825 else
826 unit = MIN (align, BITS_PER_WORD);
828 /* If VALUE is a constant other than a CONST_INT, get it into a register in
829 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
830 that VALUE might be a floating-point constant. */
831 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
833 rtx word = gen_lowpart_common (word_mode, value);
835 if (word && (value != word))
836 value = word;
837 else
838 value = gen_lowpart_common (word_mode,
839 force_reg (GET_MODE (value) != VOIDmode
840 ? GET_MODE (value)
841 : word_mode, value));
843 else if (GET_CODE (value) == ADDRESSOF)
844 value = copy_to_reg (value);
846 while (bitsdone < bitsize)
848 unsigned HOST_WIDE_INT thissize;
849 rtx part, word;
850 unsigned HOST_WIDE_INT thispos;
851 unsigned HOST_WIDE_INT offset;
853 offset = (bitpos + bitsdone) / unit;
854 thispos = (bitpos + bitsdone) % unit;
856 /* THISSIZE must not overrun a word boundary. Otherwise,
857 store_fixed_bit_field will call us again, and we will mutually
858 recurse forever. */
859 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
860 thissize = MIN (thissize, unit - thispos);
862 if (BYTES_BIG_ENDIAN)
864 int total_bits;
866 /* We must do an endian conversion exactly the same way as it is
867 done in extract_bit_field, so that the two calls to
868 extract_fixed_bit_field will have comparable arguments. */
869 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
870 total_bits = BITS_PER_WORD;
871 else
872 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
874 /* Fetch successively less significant portions. */
875 if (GET_CODE (value) == CONST_INT)
876 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
877 >> (bitsize - bitsdone - thissize))
878 & (((HOST_WIDE_INT) 1 << thissize) - 1));
879 else
880 /* The args are chosen so that the last part includes the
881 lsb. Give extract_bit_field the value it needs (with
882 endianness compensation) to fetch the piece we want.
884 ??? We have no idea what the alignment of VALUE is, so
885 we have to use a guess. */
886 part
887 = extract_fixed_bit_field
888 (word_mode, value, 0, thissize,
889 total_bits - bitsize + bitsdone, NULL_RTX, 1,
890 GET_MODE (value) == VOIDmode
891 ? UNITS_PER_WORD
892 : (GET_MODE (value) == BLKmode
893 ? 1 : GET_MODE_ALIGNMENT (GET_MODE (value))));
895 else
897 /* Fetch successively more significant portions. */
898 if (GET_CODE (value) == CONST_INT)
899 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
900 >> bitsdone)
901 & (((HOST_WIDE_INT) 1 << thissize) - 1));
902 else
903 part
904 = extract_fixed_bit_field
905 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
906 GET_MODE (value) == VOIDmode
907 ? UNITS_PER_WORD
908 : (GET_MODE (value) == BLKmode
909 ? 1 : GET_MODE_ALIGNMENT (GET_MODE (value))));
912 /* If OP0 is a register, then handle OFFSET here.
914 When handling multiword bitfields, extract_bit_field may pass
915 down a word_mode SUBREG of a larger REG for a bitfield that actually
916 crosses a word boundary. Thus, for a SUBREG, we must find
917 the current word starting from the base register. */
918 if (GET_CODE (op0) == SUBREG)
920 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
921 word = operand_subword_force (SUBREG_REG (op0), word_offset,
922 GET_MODE (SUBREG_REG (op0)));
923 offset = 0;
925 else if (GET_CODE (op0) == REG)
927 word = operand_subword_force (op0, offset, GET_MODE (op0));
928 offset = 0;
930 else
931 word = op0;
933 /* OFFSET is in UNITs, and UNIT is in bits.
934 store_fixed_bit_field wants offset in bytes. */
935 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
936 thissize, thispos, part, align);
937 bitsdone += thissize;
941 /* Generate code to extract a byte-field from STR_RTX
942 containing BITSIZE bits, starting at BITNUM,
943 and put it in TARGET if possible (if TARGET is nonzero).
944 Regardless of TARGET, we return the rtx for where the value is placed.
945 It may be a QUEUED.
947 STR_RTX is the structure containing the byte (a REG or MEM).
948 UNSIGNEDP is nonzero if this is an unsigned bit field.
949 MODE is the natural mode of the field value once extracted.
950 TMODE is the mode the caller would like the value to have;
951 but the value may be returned with type MODE instead.
953 ALIGN is the alignment that STR_RTX is known to have.
954 TOTAL_SIZE is the size in bytes of the containing structure,
955 or -1 if varying.
957 If a TARGET is specified and we can store in it at no extra cost,
958 we do so, and return TARGET.
959 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
960 if they are equally easy. */
963 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
964 target, mode, tmode, align, total_size)
965 rtx str_rtx;
966 unsigned HOST_WIDE_INT bitsize;
967 unsigned HOST_WIDE_INT bitnum;
968 int unsignedp;
969 rtx target;
970 enum machine_mode mode, tmode;
971 unsigned int align;
972 HOST_WIDE_INT total_size;
974 unsigned int unit
975 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
976 unsigned HOST_WIDE_INT offset = bitnum / unit;
977 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
978 register rtx op0 = str_rtx;
979 rtx spec_target = target;
980 rtx spec_target_subreg = 0;
981 enum machine_mode int_mode;
982 #ifdef HAVE_extv
983 unsigned HOST_WIDE_INT extv_bitsize;
984 enum machine_mode extv_mode;
985 #endif
986 #ifdef HAVE_extzv
987 unsigned HOST_WIDE_INT extzv_bitsize;
988 enum machine_mode extzv_mode;
989 #endif
991 #ifdef HAVE_extv
992 extv_mode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
993 if (extv_mode == VOIDmode)
994 extv_mode = word_mode;
995 extv_bitsize = GET_MODE_BITSIZE (extv_mode);
996 #endif
998 #ifdef HAVE_extzv
999 extzv_mode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
1000 if (extzv_mode == VOIDmode)
1001 extzv_mode = word_mode;
1002 extzv_bitsize = GET_MODE_BITSIZE (extzv_mode);
1003 #endif
1005 /* Discount the part of the structure before the desired byte.
1006 We need to know how many bytes are safe to reference after it. */
1007 if (total_size >= 0)
1008 total_size -= (bitpos / BIGGEST_ALIGNMENT
1009 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1011 if (tmode == VOIDmode)
1012 tmode = mode;
1013 while (GET_CODE (op0) == SUBREG)
1015 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
1016 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
1018 offset += SUBREG_BYTE (op0) / UNITS_PER_WORD;
1020 inner_size = MIN (inner_size, BITS_PER_WORD);
1022 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1024 bitpos += inner_size - outer_size;
1025 if (bitpos > unit)
1027 offset += (bitpos / unit);
1028 bitpos %= unit;
1032 op0 = SUBREG_REG (op0);
1035 if (GET_CODE (op0) == REG
1036 && mode == GET_MODE (op0)
1037 && bitnum == 0
1038 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1040 /* We're trying to extract a full register from itself. */
1041 return op0;
1044 /* Make sure we are playing with integral modes. Pun with subregs
1045 if we aren't. */
1047 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1048 if (imode != GET_MODE (op0))
1050 if (GET_CODE (op0) == MEM)
1051 op0 = change_address (op0, imode, NULL_RTX);
1052 else if (imode != BLKmode)
1053 op0 = gen_lowpart (imode, op0);
1054 else
1055 abort ();
1059 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1060 If that's wrong, the solution is to test for it and set TARGET to 0
1061 if needed. */
1063 /* If OP0 is a register, BITPOS must count within a word.
1064 But as we have it, it counts within whatever size OP0 now has.
1065 On a bigendian machine, these are not the same, so convert. */
1066 if (BYTES_BIG_ENDIAN
1067 && GET_CODE (op0) != MEM
1068 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1069 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1071 /* Extracting a full-word or multi-word value
1072 from a structure in a register or aligned memory.
1073 This can be done with just SUBREG.
1074 So too extracting a subword value in
1075 the least significant part of the register. */
1077 if (((GET_CODE (op0) != MEM
1078 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1079 GET_MODE_BITSIZE (GET_MODE (op0))))
1080 || (GET_CODE (op0) == MEM
1081 && (! SLOW_UNALIGNED_ACCESS (mode, align)
1082 || (offset * BITS_PER_UNIT % bitsize == 0
1083 && align % bitsize == 0))))
1084 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1085 && bitpos % BITS_PER_WORD == 0)
1086 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1087 /* ??? The big endian test here is wrong. This is correct
1088 if the value is in a register, and if mode_for_size is not
1089 the same mode as op0. This causes us to get unnecessarily
1090 inefficient code from the Thumb port when -mbig-endian. */
1091 && (BYTES_BIG_ENDIAN
1092 ? bitpos + bitsize == BITS_PER_WORD
1093 : bitpos == 0))))
1095 enum machine_mode mode1
1096 = (VECTOR_MODE_P (tmode) ? mode
1097 : mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0));
1099 if (mode1 != GET_MODE (op0))
1101 if (GET_CODE (op0) == SUBREG)
1103 if (GET_MODE (SUBREG_REG (op0)) == mode1
1104 || GET_MODE_CLASS (mode1) == MODE_INT
1105 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1106 op0 = SUBREG_REG (op0);
1107 else
1108 /* Else we've got some float mode source being extracted into
1109 a different float mode destination -- this combination of
1110 subregs results in Severe Tire Damage. */
1111 abort ();
1113 if (GET_CODE (op0) == REG)
1114 op0 = gen_rtx_SUBREG (mode1, op0,
1115 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
1116 + (offset * UNITS_PER_WORD));
1117 else
1118 op0 = change_address (op0, mode1,
1119 plus_constant (XEXP (op0, 0), offset));
1121 if (mode1 != mode)
1122 return convert_to_mode (tmode, op0, unsignedp);
1123 return op0;
1126 /* Handle fields bigger than a word. */
1128 if (bitsize > BITS_PER_WORD)
1130 /* Here we transfer the words of the field
1131 in the order least significant first.
1132 This is because the most significant word is the one which may
1133 be less than full. */
1135 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1136 unsigned int i;
1138 if (target == 0 || GET_CODE (target) != REG)
1139 target = gen_reg_rtx (mode);
1141 /* Indicate for flow that the entire target reg is being set. */
1142 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1144 for (i = 0; i < nwords; i++)
1146 /* If I is 0, use the low-order word in both field and target;
1147 if I is 1, use the next to lowest word; and so on. */
1148 /* Word number in TARGET to use. */
1149 unsigned int wordnum
1150 = (WORDS_BIG_ENDIAN
1151 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1152 : i);
1153 /* Offset from start of field in OP0. */
1154 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1155 ? MAX (0, ((int) bitsize - ((int) i + 1)
1156 * (int) BITS_PER_WORD))
1157 : (int) i * BITS_PER_WORD);
1158 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1159 rtx result_part
1160 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1161 bitsize - i * BITS_PER_WORD),
1162 bitnum + bit_offset, 1, target_part, mode,
1163 word_mode, align, total_size);
1165 if (target_part == 0)
1166 abort ();
1168 if (result_part != target_part)
1169 emit_move_insn (target_part, result_part);
1172 if (unsignedp)
1174 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1175 need to be zero'd out. */
1176 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1178 unsigned int i, total_words;
1180 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1181 for (i = nwords; i < total_words; i++)
1183 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1184 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1185 emit_move_insn (target_part, const0_rtx);
1188 return target;
1191 /* Signed bit field: sign-extend with two arithmetic shifts. */
1192 target = expand_shift (LSHIFT_EXPR, mode, target,
1193 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1194 NULL_RTX, 0);
1195 return expand_shift (RSHIFT_EXPR, mode, target,
1196 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1197 NULL_RTX, 0);
1200 /* From here on we know the desired field is smaller than a word. */
1202 /* Check if there is a correspondingly-sized integer field, so we can
1203 safely extract it as one size of integer, if necessary; then
1204 truncate or extend to the size that is wanted; then use SUBREGs or
1205 convert_to_mode to get one of the modes we really wanted. */
1207 int_mode = int_mode_for_mode (tmode);
1208 if (int_mode == BLKmode)
1209 int_mode = int_mode_for_mode (mode);
1210 if (int_mode == BLKmode)
1211 abort(); /* Should probably push op0 out to memory and then
1212 do a load. */
1214 /* OFFSET is the number of words or bytes (UNIT says which)
1215 from STR_RTX to the first word or byte containing part of the field. */
1217 if (GET_CODE (op0) != MEM)
1219 if (offset != 0
1220 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1222 if (GET_CODE (op0) != REG)
1223 op0 = copy_to_reg (op0);
1224 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1225 op0, (offset * UNITS_PER_WORD));
1227 offset = 0;
1229 else
1231 op0 = protect_from_queue (str_rtx, 1);
1234 /* Now OFFSET is nonzero only for memory operands. */
1236 if (unsignedp)
1238 #ifdef HAVE_extzv
1239 if (HAVE_extzv
1240 && (extzv_bitsize >= bitsize)
1241 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1242 && (bitsize + bitpos > extzv_bitsize)))
1244 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1245 rtx bitsize_rtx, bitpos_rtx;
1246 rtx last = get_last_insn ();
1247 rtx xop0 = op0;
1248 rtx xtarget = target;
1249 rtx xspec_target = spec_target;
1250 rtx xspec_target_subreg = spec_target_subreg;
1251 rtx pat;
1252 enum machine_mode maxmode;
1254 maxmode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
1255 if (maxmode == VOIDmode)
1256 maxmode = word_mode;
1258 if (GET_CODE (xop0) == MEM)
1260 int save_volatile_ok = volatile_ok;
1261 volatile_ok = 1;
1263 /* Is the memory operand acceptable? */
1264 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1265 (xop0, GET_MODE (xop0))))
1267 /* No, load into a reg and extract from there. */
1268 enum machine_mode bestmode;
1270 /* Get the mode to use for inserting into this field. If
1271 OP0 is BLKmode, get the smallest mode consistent with the
1272 alignment. If OP0 is a non-BLKmode object that is no
1273 wider than MAXMODE, use its mode. Otherwise, use the
1274 smallest mode containing the field. */
1276 if (GET_MODE (xop0) == BLKmode
1277 || (GET_MODE_SIZE (GET_MODE (op0))
1278 > GET_MODE_SIZE (maxmode)))
1279 bestmode = get_best_mode (bitsize, bitnum, align, maxmode,
1280 MEM_VOLATILE_P (xop0));
1281 else
1282 bestmode = GET_MODE (xop0);
1284 if (bestmode == VOIDmode
1285 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
1286 && GET_MODE_BITSIZE (bestmode) > align))
1287 goto extzv_loses;
1289 /* Compute offset as multiple of this unit,
1290 counting in bytes. */
1291 unit = GET_MODE_BITSIZE (bestmode);
1292 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1293 xbitpos = bitnum % unit;
1294 xop0 = change_address (xop0, bestmode,
1295 plus_constant (XEXP (xop0, 0),
1296 xoffset));
1297 /* Fetch it to a register in that size. */
1298 xop0 = force_reg (bestmode, xop0);
1300 /* XBITPOS counts within UNIT, which is what is expected. */
1302 else
1303 /* Get ref to first byte containing part of the field. */
1304 xop0 = change_address (xop0, byte_mode,
1305 plus_constant (XEXP (xop0, 0), xoffset));
1307 volatile_ok = save_volatile_ok;
1310 /* If op0 is a register, we need it in MAXMODE (which is usually
1311 SImode). to make it acceptable to the format of extzv. */
1312 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1313 goto extzv_loses;
1314 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1315 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1317 /* On big-endian machines, we count bits from the most significant.
1318 If the bit field insn does not, we must invert. */
1319 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1320 xbitpos = unit - bitsize - xbitpos;
1322 /* Now convert from counting within UNIT to counting in MAXMODE. */
1323 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1324 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1326 unit = GET_MODE_BITSIZE (maxmode);
1328 if (xtarget == 0
1329 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1330 xtarget = xspec_target = gen_reg_rtx (tmode);
1332 if (GET_MODE (xtarget) != maxmode)
1334 if (GET_CODE (xtarget) == REG)
1336 int wider = (GET_MODE_SIZE (maxmode)
1337 > GET_MODE_SIZE (GET_MODE (xtarget)));
1338 xtarget = gen_lowpart (maxmode, xtarget);
1339 if (wider)
1340 xspec_target_subreg = xtarget;
1342 else
1343 xtarget = gen_reg_rtx (maxmode);
1346 /* If this machine's extzv insists on a register target,
1347 make sure we have one. */
1348 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1349 (xtarget, maxmode)))
1350 xtarget = gen_reg_rtx (maxmode);
1352 bitsize_rtx = GEN_INT (bitsize);
1353 bitpos_rtx = GEN_INT (xbitpos);
1355 pat = gen_extzv (protect_from_queue (xtarget, 1),
1356 xop0, bitsize_rtx, bitpos_rtx);
1357 if (pat)
1359 emit_insn (pat);
1360 target = xtarget;
1361 spec_target = xspec_target;
1362 spec_target_subreg = xspec_target_subreg;
1364 else
1366 delete_insns_since (last);
1367 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1368 bitpos, target, 1, align);
1371 else
1372 extzv_loses:
1373 #endif
1374 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1375 bitpos, target, 1, align);
1377 else
1379 #ifdef HAVE_extv
1380 if (HAVE_extv
1381 && (extv_bitsize >= bitsize)
1382 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1383 && (bitsize + bitpos > extv_bitsize)))
1385 int xbitpos = bitpos, xoffset = offset;
1386 rtx bitsize_rtx, bitpos_rtx;
1387 rtx last = get_last_insn ();
1388 rtx xop0 = op0, xtarget = target;
1389 rtx xspec_target = spec_target;
1390 rtx xspec_target_subreg = spec_target_subreg;
1391 rtx pat;
1392 enum machine_mode maxmode;
1394 maxmode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
1395 if (maxmode == VOIDmode)
1396 maxmode = word_mode;
1398 if (GET_CODE (xop0) == MEM)
1400 /* Is the memory operand acceptable? */
1401 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1402 (xop0, GET_MODE (xop0))))
1404 /* No, load into a reg and extract from there. */
1405 enum machine_mode bestmode;
1407 /* Get the mode to use for inserting into this field. If
1408 OP0 is BLKmode, get the smallest mode consistent with the
1409 alignment. If OP0 is a non-BLKmode object that is no
1410 wider than MAXMODE, use its mode. Otherwise, use the
1411 smallest mode containing the field. */
1413 if (GET_MODE (xop0) == BLKmode
1414 || (GET_MODE_SIZE (GET_MODE (op0))
1415 > GET_MODE_SIZE (maxmode)))
1416 bestmode = get_best_mode (bitsize, bitnum, align, maxmode,
1417 MEM_VOLATILE_P (xop0));
1418 else
1419 bestmode = GET_MODE (xop0);
1421 if (bestmode == VOIDmode
1422 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
1423 && GET_MODE_BITSIZE (bestmode) > align))
1424 goto extv_loses;
1426 /* Compute offset as multiple of this unit,
1427 counting in bytes. */
1428 unit = GET_MODE_BITSIZE (bestmode);
1429 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1430 xbitpos = bitnum % unit;
1431 xop0 = change_address (xop0, bestmode,
1432 plus_constant (XEXP (xop0, 0),
1433 xoffset));
1434 /* Fetch it to a register in that size. */
1435 xop0 = force_reg (bestmode, xop0);
1437 /* XBITPOS counts within UNIT, which is what is expected. */
1439 else
1440 /* Get ref to first byte containing part of the field. */
1441 xop0 = change_address (xop0, byte_mode,
1442 plus_constant (XEXP (xop0, 0), xoffset));
1445 /* If op0 is a register, we need it in MAXMODE (which is usually
1446 SImode) to make it acceptable to the format of extv. */
1447 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1448 goto extv_loses;
1449 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1450 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1452 /* On big-endian machines, we count bits from the most significant.
1453 If the bit field insn does not, we must invert. */
1454 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1455 xbitpos = unit - bitsize - xbitpos;
1457 /* XBITPOS counts within a size of UNIT.
1458 Adjust to count within a size of MAXMODE. */
1459 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1460 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1462 unit = GET_MODE_BITSIZE (maxmode);
1464 if (xtarget == 0
1465 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1466 xtarget = xspec_target = gen_reg_rtx (tmode);
1468 if (GET_MODE (xtarget) != maxmode)
1470 if (GET_CODE (xtarget) == REG)
1472 int wider = (GET_MODE_SIZE (maxmode)
1473 > GET_MODE_SIZE (GET_MODE (xtarget)));
1474 xtarget = gen_lowpart (maxmode, xtarget);
1475 if (wider)
1476 xspec_target_subreg = xtarget;
1478 else
1479 xtarget = gen_reg_rtx (maxmode);
1482 /* If this machine's extv insists on a register target,
1483 make sure we have one. */
1484 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1485 (xtarget, maxmode)))
1486 xtarget = gen_reg_rtx (maxmode);
1488 bitsize_rtx = GEN_INT (bitsize);
1489 bitpos_rtx = GEN_INT (xbitpos);
1491 pat = gen_extv (protect_from_queue (xtarget, 1),
1492 xop0, bitsize_rtx, bitpos_rtx);
1493 if (pat)
1495 emit_insn (pat);
1496 target = xtarget;
1497 spec_target = xspec_target;
1498 spec_target_subreg = xspec_target_subreg;
1500 else
1502 delete_insns_since (last);
1503 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1504 bitpos, target, 0, align);
1507 else
1508 extv_loses:
1509 #endif
1510 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1511 bitpos, target, 0, align);
1513 if (target == spec_target)
1514 return target;
1515 if (target == spec_target_subreg)
1516 return spec_target;
1517 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1519 /* If the target mode is floating-point, first convert to the
1520 integer mode of that size and then access it as a floating-point
1521 value via a SUBREG. */
1522 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1524 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1525 MODE_INT, 0),
1526 target, unsignedp);
1527 if (GET_CODE (target) != REG)
1528 target = copy_to_reg (target);
1529 return gen_rtx_SUBREG (tmode, target, 0);
1531 else
1532 return convert_to_mode (tmode, target, unsignedp);
1534 return target;
1537 /* Extract a bit field using shifts and boolean operations
1538 Returns an rtx to represent the value.
1539 OP0 addresses a register (word) or memory (byte).
1540 BITPOS says which bit within the word or byte the bit field starts in.
1541 OFFSET says how many bytes farther the bit field starts;
1542 it is 0 if OP0 is a register.
1543 BITSIZE says how many bits long the bit field is.
1544 (If OP0 is a register, it may be narrower than a full word,
1545 but BITPOS still counts within a full word,
1546 which is significant on bigendian machines.)
1548 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1549 If TARGET is nonzero, attempts to store the value there
1550 and return TARGET, but this is not guaranteed.
1551 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1553 ALIGN is the alignment that STR_RTX is known to have. */
1555 static rtx
1556 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1557 target, unsignedp, align)
1558 enum machine_mode tmode;
1559 register rtx op0, target;
1560 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
1561 int unsignedp;
1562 unsigned int align;
1564 unsigned int total_bits = BITS_PER_WORD;
1565 enum machine_mode mode;
1567 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1569 /* Special treatment for a bit field split across two registers. */
1570 if (bitsize + bitpos > BITS_PER_WORD)
1571 return extract_split_bit_field (op0, bitsize, bitpos,
1572 unsignedp, align);
1574 else
1576 /* Get the proper mode to use for this field. We want a mode that
1577 includes the entire field. If such a mode would be larger than
1578 a word, we won't be doing the extraction the normal way. */
1580 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT, align,
1581 word_mode,
1582 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1584 if (mode == VOIDmode)
1585 /* The only way this should occur is if the field spans word
1586 boundaries. */
1587 return extract_split_bit_field (op0, bitsize,
1588 bitpos + offset * BITS_PER_UNIT,
1589 unsignedp, align);
1591 total_bits = GET_MODE_BITSIZE (mode);
1593 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1594 be in the range 0 to total_bits-1, and put any excess bytes in
1595 OFFSET. */
1596 if (bitpos >= total_bits)
1598 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1599 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1600 * BITS_PER_UNIT);
1603 /* Get ref to an aligned byte, halfword, or word containing the field.
1604 Adjust BITPOS to be position within a word,
1605 and OFFSET to be the offset of that word.
1606 Then alter OP0 to refer to that word. */
1607 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1608 offset -= (offset % (total_bits / BITS_PER_UNIT));
1609 op0 = change_address (op0, mode,
1610 plus_constant (XEXP (op0, 0), offset));
1613 mode = GET_MODE (op0);
1615 if (BYTES_BIG_ENDIAN)
1617 /* BITPOS is the distance between our msb and that of OP0.
1618 Convert it to the distance from the lsb. */
1620 bitpos = total_bits - bitsize - bitpos;
1623 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1624 We have reduced the big-endian case to the little-endian case. */
1626 if (unsignedp)
1628 if (bitpos)
1630 /* If the field does not already start at the lsb,
1631 shift it so it does. */
1632 tree amount = build_int_2 (bitpos, 0);
1633 /* Maybe propagate the target for the shift. */
1634 /* But not if we will return it--could confuse integrate.c. */
1635 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1636 && !REG_FUNCTION_VALUE_P (target)
1637 ? target : 0);
1638 if (tmode != mode) subtarget = 0;
1639 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1641 /* Convert the value to the desired mode. */
1642 if (mode != tmode)
1643 op0 = convert_to_mode (tmode, op0, 1);
1645 /* Unless the msb of the field used to be the msb when we shifted,
1646 mask out the upper bits. */
1648 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1649 #if 0
1650 #ifdef SLOW_ZERO_EXTEND
1651 /* Always generate an `and' if
1652 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1653 will combine fruitfully with the zero-extend. */
1654 || tmode != mode
1655 #endif
1656 #endif
1658 return expand_binop (GET_MODE (op0), and_optab, op0,
1659 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1660 target, 1, OPTAB_LIB_WIDEN);
1661 return op0;
1664 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1665 then arithmetic-shift its lsb to the lsb of the word. */
1666 op0 = force_reg (mode, op0);
1667 if (mode != tmode)
1668 target = 0;
1670 /* Find the narrowest integer mode that contains the field. */
1672 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1673 mode = GET_MODE_WIDER_MODE (mode))
1674 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1676 op0 = convert_to_mode (mode, op0, 0);
1677 break;
1680 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1682 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1683 /* Maybe propagate the target for the shift. */
1684 /* But not if we will return the result--could confuse integrate.c. */
1685 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1686 && ! REG_FUNCTION_VALUE_P (target)
1687 ? target : 0);
1688 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1691 return expand_shift (RSHIFT_EXPR, mode, op0,
1692 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1693 target, 0);
1696 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1697 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1698 complement of that if COMPLEMENT. The mask is truncated if
1699 necessary to the width of mode MODE. The mask is zero-extended if
1700 BITSIZE+BITPOS is too small for MODE. */
1702 static rtx
1703 mask_rtx (mode, bitpos, bitsize, complement)
1704 enum machine_mode mode;
1705 int bitpos, bitsize, complement;
1707 HOST_WIDE_INT masklow, maskhigh;
1709 if (bitpos < HOST_BITS_PER_WIDE_INT)
1710 masklow = (HOST_WIDE_INT) -1 << bitpos;
1711 else
1712 masklow = 0;
1714 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1715 masklow &= ((unsigned HOST_WIDE_INT) -1
1716 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1718 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1719 maskhigh = -1;
1720 else
1721 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1723 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1724 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1725 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1726 else
1727 maskhigh = 0;
1729 if (complement)
1731 maskhigh = ~maskhigh;
1732 masklow = ~masklow;
1735 return immed_double_const (masklow, maskhigh, mode);
1738 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1739 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1741 static rtx
1742 lshift_value (mode, value, bitpos, bitsize)
1743 enum machine_mode mode;
1744 rtx value;
1745 int bitpos, bitsize;
1747 unsigned HOST_WIDE_INT v = INTVAL (value);
1748 HOST_WIDE_INT low, high;
1750 if (bitsize < HOST_BITS_PER_WIDE_INT)
1751 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1753 if (bitpos < HOST_BITS_PER_WIDE_INT)
1755 low = v << bitpos;
1756 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1758 else
1760 low = 0;
1761 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1764 return immed_double_const (low, high, mode);
1767 /* Extract a bit field that is split across two words
1768 and return an RTX for the result.
1770 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1771 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1772 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1774 ALIGN is the known alignment of OP0. This is also the size of the
1775 memory objects to be used. */
1777 static rtx
1778 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1779 rtx op0;
1780 unsigned HOST_WIDE_INT bitsize, bitpos;
1781 int unsignedp;
1782 unsigned int align;
1784 unsigned int unit;
1785 unsigned int bitsdone = 0;
1786 rtx result = NULL_RTX;
1787 int first = 1;
1789 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1790 much at a time. */
1791 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1792 unit = BITS_PER_WORD;
1793 else
1794 unit = MIN (align, BITS_PER_WORD);
1796 while (bitsdone < bitsize)
1798 unsigned HOST_WIDE_INT thissize;
1799 rtx part, word;
1800 unsigned HOST_WIDE_INT thispos;
1801 unsigned HOST_WIDE_INT offset;
1803 offset = (bitpos + bitsdone) / unit;
1804 thispos = (bitpos + bitsdone) % unit;
1806 /* THISSIZE must not overrun a word boundary. Otherwise,
1807 extract_fixed_bit_field will call us again, and we will mutually
1808 recurse forever. */
1809 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1810 thissize = MIN (thissize, unit - thispos);
1812 /* If OP0 is a register, then handle OFFSET here.
1814 When handling multiword bitfields, extract_bit_field may pass
1815 down a word_mode SUBREG of a larger REG for a bitfield that actually
1816 crosses a word boundary. Thus, for a SUBREG, we must find
1817 the current word starting from the base register. */
1818 if (GET_CODE (op0) == SUBREG)
1820 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1821 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1822 GET_MODE (SUBREG_REG (op0)));
1823 offset = 0;
1825 else if (GET_CODE (op0) == REG)
1827 word = operand_subword_force (op0, offset, GET_MODE (op0));
1828 offset = 0;
1830 else
1831 word = op0;
1833 /* Extract the parts in bit-counting order,
1834 whose meaning is determined by BYTES_PER_UNIT.
1835 OFFSET is in UNITs, and UNIT is in bits.
1836 extract_fixed_bit_field wants offset in bytes. */
1837 part = extract_fixed_bit_field (word_mode, word,
1838 offset * unit / BITS_PER_UNIT,
1839 thissize, thispos, 0, 1, align);
1840 bitsdone += thissize;
1842 /* Shift this part into place for the result. */
1843 if (BYTES_BIG_ENDIAN)
1845 if (bitsize != bitsdone)
1846 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1847 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1849 else
1851 if (bitsdone != thissize)
1852 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1853 build_int_2 (bitsdone - thissize, 0), 0, 1);
1856 if (first)
1857 result = part;
1858 else
1859 /* Combine the parts with bitwise or. This works
1860 because we extracted each part as an unsigned bit field. */
1861 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1862 OPTAB_LIB_WIDEN);
1864 first = 0;
1867 /* Unsigned bit field: we are done. */
1868 if (unsignedp)
1869 return result;
1870 /* Signed bit field: sign-extend with two arithmetic shifts. */
1871 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1872 build_int_2 (BITS_PER_WORD - bitsize, 0),
1873 NULL_RTX, 0);
1874 return expand_shift (RSHIFT_EXPR, word_mode, result,
1875 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1878 /* Add INC into TARGET. */
1880 void
1881 expand_inc (target, inc)
1882 rtx target, inc;
1884 rtx value = expand_binop (GET_MODE (target), add_optab,
1885 target, inc,
1886 target, 0, OPTAB_LIB_WIDEN);
1887 if (value != target)
1888 emit_move_insn (target, value);
1891 /* Subtract DEC from TARGET. */
1893 void
1894 expand_dec (target, dec)
1895 rtx target, dec;
1897 rtx value = expand_binop (GET_MODE (target), sub_optab,
1898 target, dec,
1899 target, 0, OPTAB_LIB_WIDEN);
1900 if (value != target)
1901 emit_move_insn (target, value);
1904 /* Output a shift instruction for expression code CODE,
1905 with SHIFTED being the rtx for the value to shift,
1906 and AMOUNT the tree for the amount to shift by.
1907 Store the result in the rtx TARGET, if that is convenient.
1908 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1909 Return the rtx for where the value is. */
1912 expand_shift (code, mode, shifted, amount, target, unsignedp)
1913 enum tree_code code;
1914 register enum machine_mode mode;
1915 rtx shifted;
1916 tree amount;
1917 register rtx target;
1918 int unsignedp;
1920 register rtx op1, temp = 0;
1921 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1922 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1923 int try;
1925 /* Previously detected shift-counts computed by NEGATE_EXPR
1926 and shifted in the other direction; but that does not work
1927 on all machines. */
1929 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1931 #ifdef SHIFT_COUNT_TRUNCATED
1932 if (SHIFT_COUNT_TRUNCATED)
1934 if (GET_CODE (op1) == CONST_INT
1935 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1936 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1937 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1938 % GET_MODE_BITSIZE (mode));
1939 else if (GET_CODE (op1) == SUBREG
1940 && SUBREG_BYTE (op1) == 0)
1941 op1 = SUBREG_REG (op1);
1943 #endif
1945 if (op1 == const0_rtx)
1946 return shifted;
1948 for (try = 0; temp == 0 && try < 3; try++)
1950 enum optab_methods methods;
1952 if (try == 0)
1953 methods = OPTAB_DIRECT;
1954 else if (try == 1)
1955 methods = OPTAB_WIDEN;
1956 else
1957 methods = OPTAB_LIB_WIDEN;
1959 if (rotate)
1961 /* Widening does not work for rotation. */
1962 if (methods == OPTAB_WIDEN)
1963 continue;
1964 else if (methods == OPTAB_LIB_WIDEN)
1966 /* If we have been unable to open-code this by a rotation,
1967 do it as the IOR of two shifts. I.e., to rotate A
1968 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1969 where C is the bitsize of A.
1971 It is theoretically possible that the target machine might
1972 not be able to perform either shift and hence we would
1973 be making two libcalls rather than just the one for the
1974 shift (similarly if IOR could not be done). We will allow
1975 this extremely unlikely lossage to avoid complicating the
1976 code below. */
1978 rtx subtarget = target == shifted ? 0 : target;
1979 rtx temp1;
1980 tree type = TREE_TYPE (amount);
1981 tree new_amount = make_tree (type, op1);
1982 tree other_amount
1983 = fold (build (MINUS_EXPR, type,
1984 convert (type,
1985 build_int_2 (GET_MODE_BITSIZE (mode),
1986 0)),
1987 amount));
1989 shifted = force_reg (mode, shifted);
1991 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1992 mode, shifted, new_amount, subtarget, 1);
1993 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1994 mode, shifted, other_amount, 0, 1);
1995 return expand_binop (mode, ior_optab, temp, temp1, target,
1996 unsignedp, methods);
1999 temp = expand_binop (mode,
2000 left ? rotl_optab : rotr_optab,
2001 shifted, op1, target, unsignedp, methods);
2003 /* If we don't have the rotate, but we are rotating by a constant
2004 that is in range, try a rotate in the opposite direction. */
2006 if (temp == 0 && GET_CODE (op1) == CONST_INT
2007 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
2008 temp = expand_binop (mode,
2009 left ? rotr_optab : rotl_optab,
2010 shifted,
2011 GEN_INT (GET_MODE_BITSIZE (mode)
2012 - INTVAL (op1)),
2013 target, unsignedp, methods);
2015 else if (unsignedp)
2016 temp = expand_binop (mode,
2017 left ? ashl_optab : lshr_optab,
2018 shifted, op1, target, unsignedp, methods);
2020 /* Do arithmetic shifts.
2021 Also, if we are going to widen the operand, we can just as well
2022 use an arithmetic right-shift instead of a logical one. */
2023 if (temp == 0 && ! rotate
2024 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2026 enum optab_methods methods1 = methods;
2028 /* If trying to widen a log shift to an arithmetic shift,
2029 don't accept an arithmetic shift of the same size. */
2030 if (unsignedp)
2031 methods1 = OPTAB_MUST_WIDEN;
2033 /* Arithmetic shift */
2035 temp = expand_binop (mode,
2036 left ? ashl_optab : ashr_optab,
2037 shifted, op1, target, unsignedp, methods1);
2040 /* We used to try extzv here for logical right shifts, but that was
2041 only useful for one machine, the VAX, and caused poor code
2042 generation there for lshrdi3, so the code was deleted and a
2043 define_expand for lshrsi3 was added to vax.md. */
2046 if (temp == 0)
2047 abort ();
2048 return temp;
2051 enum alg_code { alg_zero, alg_m, alg_shift,
2052 alg_add_t_m2, alg_sub_t_m2,
2053 alg_add_factor, alg_sub_factor,
2054 alg_add_t2_m, alg_sub_t2_m,
2055 alg_add, alg_subtract, alg_factor, alg_shiftop };
2057 /* This structure records a sequence of operations.
2058 `ops' is the number of operations recorded.
2059 `cost' is their total cost.
2060 The operations are stored in `op' and the corresponding
2061 logarithms of the integer coefficients in `log'.
2063 These are the operations:
2064 alg_zero total := 0;
2065 alg_m total := multiplicand;
2066 alg_shift total := total * coeff
2067 alg_add_t_m2 total := total + multiplicand * coeff;
2068 alg_sub_t_m2 total := total - multiplicand * coeff;
2069 alg_add_factor total := total * coeff + total;
2070 alg_sub_factor total := total * coeff - total;
2071 alg_add_t2_m total := total * coeff + multiplicand;
2072 alg_sub_t2_m total := total * coeff - multiplicand;
2074 The first operand must be either alg_zero or alg_m. */
2076 struct algorithm
2078 short cost;
2079 short ops;
2080 /* The size of the OP and LOG fields are not directly related to the
2081 word size, but the worst-case algorithms will be if we have few
2082 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2083 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2084 in total wordsize operations. */
2085 enum alg_code op[MAX_BITS_PER_WORD];
2086 char log[MAX_BITS_PER_WORD];
2089 static void synth_mult PARAMS ((struct algorithm *,
2090 unsigned HOST_WIDE_INT,
2091 int));
2092 static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2093 int, int,
2094 unsigned HOST_WIDE_INT *,
2095 int *, int *));
2096 static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
2097 int));
2098 /* Compute and return the best algorithm for multiplying by T.
2099 The algorithm must cost less than cost_limit
2100 If retval.cost >= COST_LIMIT, no algorithm was found and all
2101 other field of the returned struct are undefined. */
2103 static void
2104 synth_mult (alg_out, t, cost_limit)
2105 struct algorithm *alg_out;
2106 unsigned HOST_WIDE_INT t;
2107 int cost_limit;
2109 int m;
2110 struct algorithm *alg_in, *best_alg;
2111 int cost;
2112 unsigned HOST_WIDE_INT q;
2114 /* Indicate that no algorithm is yet found. If no algorithm
2115 is found, this value will be returned and indicate failure. */
2116 alg_out->cost = cost_limit;
2118 if (cost_limit <= 0)
2119 return;
2121 /* t == 1 can be done in zero cost. */
2122 if (t == 1)
2124 alg_out->ops = 1;
2125 alg_out->cost = 0;
2126 alg_out->op[0] = alg_m;
2127 return;
2130 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2131 fail now. */
2132 if (t == 0)
2134 if (zero_cost >= cost_limit)
2135 return;
2136 else
2138 alg_out->ops = 1;
2139 alg_out->cost = zero_cost;
2140 alg_out->op[0] = alg_zero;
2141 return;
2145 /* We'll be needing a couple extra algorithm structures now. */
2147 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2148 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2150 /* If we have a group of zero bits at the low-order part of T, try
2151 multiplying by the remaining bits and then doing a shift. */
2153 if ((t & 1) == 0)
2155 m = floor_log2 (t & -t); /* m = number of low zero bits */
2156 if (m < BITS_PER_WORD)
2158 q = t >> m;
2159 cost = shift_cost[m];
2160 synth_mult (alg_in, q, cost_limit - cost);
2162 cost += alg_in->cost;
2163 if (cost < cost_limit)
2165 struct algorithm *x;
2166 x = alg_in, alg_in = best_alg, best_alg = x;
2167 best_alg->log[best_alg->ops] = m;
2168 best_alg->op[best_alg->ops] = alg_shift;
2169 cost_limit = cost;
2174 /* If we have an odd number, add or subtract one. */
2175 if ((t & 1) != 0)
2177 unsigned HOST_WIDE_INT w;
2179 for (w = 1; (w & t) != 0; w <<= 1)
2181 /* If T was -1, then W will be zero after the loop. This is another
2182 case where T ends with ...111. Handling this with (T + 1) and
2183 subtract 1 produces slightly better code and results in algorithm
2184 selection much faster than treating it like the ...0111 case
2185 below. */
2186 if (w == 0
2187 || (w > 2
2188 /* Reject the case where t is 3.
2189 Thus we prefer addition in that case. */
2190 && t != 3))
2192 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2194 cost = add_cost;
2195 synth_mult (alg_in, t + 1, cost_limit - cost);
2197 cost += alg_in->cost;
2198 if (cost < cost_limit)
2200 struct algorithm *x;
2201 x = alg_in, alg_in = best_alg, best_alg = x;
2202 best_alg->log[best_alg->ops] = 0;
2203 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2204 cost_limit = cost;
2207 else
2209 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2211 cost = add_cost;
2212 synth_mult (alg_in, t - 1, cost_limit - cost);
2214 cost += alg_in->cost;
2215 if (cost < cost_limit)
2217 struct algorithm *x;
2218 x = alg_in, alg_in = best_alg, best_alg = x;
2219 best_alg->log[best_alg->ops] = 0;
2220 best_alg->op[best_alg->ops] = alg_add_t_m2;
2221 cost_limit = cost;
2226 /* Look for factors of t of the form
2227 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2228 If we find such a factor, we can multiply by t using an algorithm that
2229 multiplies by q, shift the result by m and add/subtract it to itself.
2231 We search for large factors first and loop down, even if large factors
2232 are less probable than small; if we find a large factor we will find a
2233 good sequence quickly, and therefore be able to prune (by decreasing
2234 COST_LIMIT) the search. */
2236 for (m = floor_log2 (t - 1); m >= 2; m--)
2238 unsigned HOST_WIDE_INT d;
2240 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2241 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2243 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2244 synth_mult (alg_in, t / d, cost_limit - cost);
2246 cost += alg_in->cost;
2247 if (cost < cost_limit)
2249 struct algorithm *x;
2250 x = alg_in, alg_in = best_alg, best_alg = x;
2251 best_alg->log[best_alg->ops] = m;
2252 best_alg->op[best_alg->ops] = alg_add_factor;
2253 cost_limit = cost;
2255 /* Other factors will have been taken care of in the recursion. */
2256 break;
2259 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2260 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2262 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2263 synth_mult (alg_in, t / d, cost_limit - cost);
2265 cost += alg_in->cost;
2266 if (cost < cost_limit)
2268 struct algorithm *x;
2269 x = alg_in, alg_in = best_alg, best_alg = x;
2270 best_alg->log[best_alg->ops] = m;
2271 best_alg->op[best_alg->ops] = alg_sub_factor;
2272 cost_limit = cost;
2274 break;
2278 /* Try shift-and-add (load effective address) instructions,
2279 i.e. do a*3, a*5, a*9. */
2280 if ((t & 1) != 0)
2282 q = t - 1;
2283 q = q & -q;
2284 m = exact_log2 (q);
2285 if (m >= 0 && m < BITS_PER_WORD)
2287 cost = shiftadd_cost[m];
2288 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2290 cost += alg_in->cost;
2291 if (cost < cost_limit)
2293 struct algorithm *x;
2294 x = alg_in, alg_in = best_alg, best_alg = x;
2295 best_alg->log[best_alg->ops] = m;
2296 best_alg->op[best_alg->ops] = alg_add_t2_m;
2297 cost_limit = cost;
2301 q = t + 1;
2302 q = q & -q;
2303 m = exact_log2 (q);
2304 if (m >= 0 && m < BITS_PER_WORD)
2306 cost = shiftsub_cost[m];
2307 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2309 cost += alg_in->cost;
2310 if (cost < cost_limit)
2312 struct algorithm *x;
2313 x = alg_in, alg_in = best_alg, best_alg = x;
2314 best_alg->log[best_alg->ops] = m;
2315 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2316 cost_limit = cost;
2321 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2322 we have not found any algorithm. */
2323 if (cost_limit == alg_out->cost)
2324 return;
2326 /* If we are getting a too long sequence for `struct algorithm'
2327 to record, make this search fail. */
2328 if (best_alg->ops == MAX_BITS_PER_WORD)
2329 return;
2331 /* Copy the algorithm from temporary space to the space at alg_out.
2332 We avoid using structure assignment because the majority of
2333 best_alg is normally undefined, and this is a critical function. */
2334 alg_out->ops = best_alg->ops + 1;
2335 alg_out->cost = cost_limit;
2336 memcpy (alg_out->op, best_alg->op,
2337 alg_out->ops * sizeof *alg_out->op);
2338 memcpy (alg_out->log, best_alg->log,
2339 alg_out->ops * sizeof *alg_out->log);
2342 /* Perform a multiplication and return an rtx for the result.
2343 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2344 TARGET is a suggestion for where to store the result (an rtx).
2346 We check specially for a constant integer as OP1.
2347 If you want this check for OP0 as well, then before calling
2348 you should swap the two operands if OP0 would be constant. */
2351 expand_mult (mode, op0, op1, target, unsignedp)
2352 enum machine_mode mode;
2353 register rtx op0, op1, target;
2354 int unsignedp;
2356 rtx const_op1 = op1;
2358 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2359 less than or equal in size to `unsigned int' this doesn't matter.
2360 If the mode is larger than `unsigned int', then synth_mult works only
2361 if the constant value exactly fits in an `unsigned int' without any
2362 truncation. This means that multiplying by negative values does
2363 not work; results are off by 2^32 on a 32 bit machine. */
2365 /* If we are multiplying in DImode, it may still be a win
2366 to try to work with shifts and adds. */
2367 if (GET_CODE (op1) == CONST_DOUBLE
2368 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2369 && HOST_BITS_PER_INT >= BITS_PER_WORD
2370 && CONST_DOUBLE_HIGH (op1) == 0)
2371 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2372 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2373 && GET_CODE (op1) == CONST_INT
2374 && INTVAL (op1) < 0)
2375 const_op1 = 0;
2377 /* We used to test optimize here, on the grounds that it's better to
2378 produce a smaller program when -O is not used.
2379 But this causes such a terrible slowdown sometimes
2380 that it seems better to use synth_mult always. */
2382 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2383 && (unsignedp || ! flag_trapv))
2385 struct algorithm alg;
2386 struct algorithm alg2;
2387 HOST_WIDE_INT val = INTVAL (op1);
2388 HOST_WIDE_INT val_so_far;
2389 rtx insn;
2390 int mult_cost;
2391 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2393 /* Try to do the computation three ways: multiply by the negative of OP1
2394 and then negate, do the multiplication directly, or do multiplication
2395 by OP1 - 1. */
2397 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2398 mult_cost = MIN (12 * add_cost, mult_cost);
2400 synth_mult (&alg, val, mult_cost);
2402 /* This works only if the inverted value actually fits in an
2403 `unsigned int' */
2404 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2406 synth_mult (&alg2, - val,
2407 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2408 if (alg2.cost + negate_cost < alg.cost)
2409 alg = alg2, variant = negate_variant;
2412 /* This proves very useful for division-by-constant. */
2413 synth_mult (&alg2, val - 1,
2414 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2415 if (alg2.cost + add_cost < alg.cost)
2416 alg = alg2, variant = add_variant;
2418 if (alg.cost < mult_cost)
2420 /* We found something cheaper than a multiply insn. */
2421 int opno;
2422 rtx accum, tem;
2423 enum machine_mode nmode;
2425 op0 = protect_from_queue (op0, 0);
2427 /* Avoid referencing memory over and over.
2428 For speed, but also for correctness when mem is volatile. */
2429 if (GET_CODE (op0) == MEM)
2430 op0 = force_reg (mode, op0);
2432 /* ACCUM starts out either as OP0 or as a zero, depending on
2433 the first operation. */
2435 if (alg.op[0] == alg_zero)
2437 accum = copy_to_mode_reg (mode, const0_rtx);
2438 val_so_far = 0;
2440 else if (alg.op[0] == alg_m)
2442 accum = copy_to_mode_reg (mode, op0);
2443 val_so_far = 1;
2445 else
2446 abort ();
2448 for (opno = 1; opno < alg.ops; opno++)
2450 int log = alg.log[opno];
2451 int preserve = preserve_subexpressions_p ();
2452 rtx shift_subtarget = preserve ? 0 : accum;
2453 rtx add_target
2454 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2455 && ! preserve)
2456 ? target : 0;
2457 rtx accum_target = preserve ? 0 : accum;
2459 switch (alg.op[opno])
2461 case alg_shift:
2462 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2463 build_int_2 (log, 0), NULL_RTX, 0);
2464 val_so_far <<= log;
2465 break;
2467 case alg_add_t_m2:
2468 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2469 build_int_2 (log, 0), NULL_RTX, 0);
2470 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2471 add_target
2472 ? add_target : accum_target);
2473 val_so_far += (HOST_WIDE_INT) 1 << log;
2474 break;
2476 case alg_sub_t_m2:
2477 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2478 build_int_2 (log, 0), NULL_RTX, 0);
2479 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2480 add_target
2481 ? add_target : accum_target);
2482 val_so_far -= (HOST_WIDE_INT) 1 << log;
2483 break;
2485 case alg_add_t2_m:
2486 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2487 build_int_2 (log, 0), shift_subtarget,
2489 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2490 add_target
2491 ? add_target : accum_target);
2492 val_so_far = (val_so_far << log) + 1;
2493 break;
2495 case alg_sub_t2_m:
2496 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2497 build_int_2 (log, 0), shift_subtarget,
2499 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2500 add_target
2501 ? add_target : accum_target);
2502 val_so_far = (val_so_far << log) - 1;
2503 break;
2505 case alg_add_factor:
2506 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2507 build_int_2 (log, 0), NULL_RTX, 0);
2508 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2509 add_target
2510 ? add_target : accum_target);
2511 val_so_far += val_so_far << log;
2512 break;
2514 case alg_sub_factor:
2515 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2516 build_int_2 (log, 0), NULL_RTX, 0);
2517 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2518 (add_target ? add_target
2519 : preserve ? 0 : tem));
2520 val_so_far = (val_so_far << log) - val_so_far;
2521 break;
2523 default:
2524 abort ();
2527 /* Write a REG_EQUAL note on the last insn so that we can cse
2528 multiplication sequences. Note that if ACCUM is a SUBREG,
2529 we've set the inner register and must properly indicate
2530 that. */
2532 tem = op0, nmode = mode;
2533 if (GET_CODE (accum) == SUBREG)
2535 nmode = GET_MODE (SUBREG_REG (accum));
2536 tem = gen_lowpart (nmode, op0);
2539 insn = get_last_insn ();
2540 set_unique_reg_note (insn,
2541 REG_EQUAL,
2542 gen_rtx_MULT (nmode, tem,
2543 GEN_INT (val_so_far)));
2546 if (variant == negate_variant)
2548 val_so_far = - val_so_far;
2549 accum = expand_unop (mode, neg_optab, accum, target, 0);
2551 else if (variant == add_variant)
2553 val_so_far = val_so_far + 1;
2554 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2557 if (val != val_so_far)
2558 abort ();
2560 return accum;
2564 /* This used to use umul_optab if unsigned, but for non-widening multiply
2565 there is no difference between signed and unsigned. */
2566 op0 = expand_binop (mode,
2567 ! unsignedp
2568 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2569 ? smulv_optab : smul_optab,
2570 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2571 if (op0 == 0)
2572 abort ();
2573 return op0;
2576 /* Return the smallest n such that 2**n >= X. */
2579 ceil_log2 (x)
2580 unsigned HOST_WIDE_INT x;
2582 return floor_log2 (x - 1) + 1;
2585 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2586 replace division by D, and put the least significant N bits of the result
2587 in *MULTIPLIER_PTR and return the most significant bit.
2589 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2590 needed precision is in PRECISION (should be <= N).
2592 PRECISION should be as small as possible so this function can choose
2593 multiplier more freely.
2595 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2596 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2598 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2599 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2601 static
2602 unsigned HOST_WIDE_INT
2603 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2604 unsigned HOST_WIDE_INT d;
2605 int n;
2606 int precision;
2607 unsigned HOST_WIDE_INT *multiplier_ptr;
2608 int *post_shift_ptr;
2609 int *lgup_ptr;
2611 HOST_WIDE_INT mhigh_hi, mlow_hi;
2612 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2613 int lgup, post_shift;
2614 int pow, pow2;
2615 unsigned HOST_WIDE_INT nl, dummy1;
2616 HOST_WIDE_INT nh, dummy2;
2618 /* lgup = ceil(log2(divisor)); */
2619 lgup = ceil_log2 (d);
2621 if (lgup > n)
2622 abort ();
2624 pow = n + lgup;
2625 pow2 = n + lgup - precision;
2627 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2629 /* We could handle this with some effort, but this case is much better
2630 handled directly with a scc insn, so rely on caller using that. */
2631 abort ();
2634 /* mlow = 2^(N + lgup)/d */
2635 if (pow >= HOST_BITS_PER_WIDE_INT)
2637 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2638 nl = 0;
2640 else
2642 nh = 0;
2643 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2645 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2646 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2648 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2649 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2650 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2651 else
2652 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2653 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2654 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2656 if (mhigh_hi && nh - d >= d)
2657 abort ();
2658 if (mhigh_hi > 1 || mlow_hi > 1)
2659 abort ();
2660 /* assert that mlow < mhigh. */
2661 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2662 abort();
2664 /* If precision == N, then mlow, mhigh exceed 2^N
2665 (but they do not exceed 2^(N+1)). */
2667 /* Reduce to lowest terms */
2668 for (post_shift = lgup; post_shift > 0; post_shift--)
2670 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2671 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2672 if (ml_lo >= mh_lo)
2673 break;
2675 mlow_hi = 0;
2676 mlow_lo = ml_lo;
2677 mhigh_hi = 0;
2678 mhigh_lo = mh_lo;
2681 *post_shift_ptr = post_shift;
2682 *lgup_ptr = lgup;
2683 if (n < HOST_BITS_PER_WIDE_INT)
2685 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2686 *multiplier_ptr = mhigh_lo & mask;
2687 return mhigh_lo >= mask;
2689 else
2691 *multiplier_ptr = mhigh_lo;
2692 return mhigh_hi;
2696 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2697 congruent to 1 (mod 2**N). */
2699 static unsigned HOST_WIDE_INT
2700 invert_mod2n (x, n)
2701 unsigned HOST_WIDE_INT x;
2702 int n;
2704 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2706 /* The algorithm notes that the choice y = x satisfies
2707 x*y == 1 mod 2^3, since x is assumed odd.
2708 Each iteration doubles the number of bits of significance in y. */
2710 unsigned HOST_WIDE_INT mask;
2711 unsigned HOST_WIDE_INT y = x;
2712 int nbit = 3;
2714 mask = (n == HOST_BITS_PER_WIDE_INT
2715 ? ~(unsigned HOST_WIDE_INT) 0
2716 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2718 while (nbit < n)
2720 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2721 nbit *= 2;
2723 return y;
2726 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2727 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2728 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2729 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2730 become signed.
2732 The result is put in TARGET if that is convenient.
2734 MODE is the mode of operation. */
2737 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2738 enum machine_mode mode;
2739 register rtx adj_operand, op0, op1, target;
2740 int unsignedp;
2742 rtx tem;
2743 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2745 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2746 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2747 NULL_RTX, 0);
2748 tem = expand_and (tem, op1, NULL_RTX);
2749 adj_operand
2750 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2751 adj_operand);
2753 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2754 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2755 NULL_RTX, 0);
2756 tem = expand_and (tem, op0, NULL_RTX);
2757 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2758 target);
2760 return target;
2763 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2764 in TARGET if that is convenient, and return where the result is. If the
2765 operation can not be performed, 0 is returned.
2767 MODE is the mode of operation and result.
2769 UNSIGNEDP nonzero means unsigned multiply.
2771 MAX_COST is the total allowed cost for the expanded RTL. */
2774 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2775 enum machine_mode mode;
2776 register rtx op0, target;
2777 unsigned HOST_WIDE_INT cnst1;
2778 int unsignedp;
2779 int max_cost;
2781 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2782 optab mul_highpart_optab;
2783 optab moptab;
2784 rtx tem;
2785 int size = GET_MODE_BITSIZE (mode);
2786 rtx op1, wide_op1;
2788 /* We can't support modes wider than HOST_BITS_PER_INT. */
2789 if (size > HOST_BITS_PER_WIDE_INT)
2790 abort ();
2792 op1 = GEN_INT (trunc_int_for_mode (cnst1, mode));
2794 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2795 wide_op1 = op1;
2796 else
2797 wide_op1
2798 = immed_double_const (cnst1,
2799 (unsignedp
2800 ? (HOST_WIDE_INT) 0
2801 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2802 wider_mode);
2804 /* expand_mult handles constant multiplication of word_mode
2805 or narrower. It does a poor job for large modes. */
2806 if (size < BITS_PER_WORD
2807 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2809 /* We have to do this, since expand_binop doesn't do conversion for
2810 multiply. Maybe change expand_binop to handle widening multiply? */
2811 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2813 /* We know that this can't have signed overflow, so pretend this is
2814 an unsigned multiply. */
2815 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, 0);
2816 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2817 build_int_2 (size, 0), NULL_RTX, 1);
2818 return convert_modes (mode, wider_mode, tem, unsignedp);
2821 if (target == 0)
2822 target = gen_reg_rtx (mode);
2824 /* Firstly, try using a multiplication insn that only generates the needed
2825 high part of the product, and in the sign flavor of unsignedp. */
2826 if (mul_highpart_cost[(int) mode] < max_cost)
2828 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2829 target = expand_binop (mode, mul_highpart_optab,
2830 op0, op1, target, unsignedp, OPTAB_DIRECT);
2831 if (target)
2832 return target;
2835 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2836 Need to adjust the result after the multiplication. */
2837 if (size - 1 < BITS_PER_WORD
2838 && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
2839 < max_cost))
2841 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2842 target = expand_binop (mode, mul_highpart_optab,
2843 op0, op1, target, unsignedp, OPTAB_DIRECT);
2844 if (target)
2845 /* We used the wrong signedness. Adjust the result. */
2846 return expand_mult_highpart_adjust (mode, target, op0,
2847 op1, target, unsignedp);
2850 /* Try widening multiplication. */
2851 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2852 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2853 && mul_widen_cost[(int) wider_mode] < max_cost)
2855 op1 = force_reg (mode, op1);
2856 goto try;
2859 /* Try widening the mode and perform a non-widening multiplication. */
2860 moptab = smul_optab;
2861 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2862 && size - 1 < BITS_PER_WORD
2863 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2865 op1 = wide_op1;
2866 goto try;
2869 /* Try widening multiplication of opposite signedness, and adjust. */
2870 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2871 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2872 && size - 1 < BITS_PER_WORD
2873 && (mul_widen_cost[(int) wider_mode]
2874 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2876 rtx regop1 = force_reg (mode, op1);
2877 tem = expand_binop (wider_mode, moptab, op0, regop1,
2878 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2879 if (tem != 0)
2881 /* Extract the high half of the just generated product. */
2882 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2883 build_int_2 (size, 0), NULL_RTX, 1);
2884 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2885 /* We used the wrong signedness. Adjust the result. */
2886 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2887 target, unsignedp);
2891 return 0;
2893 try:
2894 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2895 tem = expand_binop (wider_mode, moptab, op0, op1,
2896 NULL_RTX, unsignedp, OPTAB_WIDEN);
2897 if (tem == 0)
2898 return 0;
2900 /* Extract the high half of the just generated product. */
2901 if (mode == word_mode)
2903 return gen_highpart (mode, tem);
2905 else
2907 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2908 build_int_2 (size, 0), NULL_RTX, 1);
2909 return convert_modes (mode, wider_mode, tem, unsignedp);
2913 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2914 if that is convenient, and returning where the result is.
2915 You may request either the quotient or the remainder as the result;
2916 specify REM_FLAG nonzero to get the remainder.
2918 CODE is the expression code for which kind of division this is;
2919 it controls how rounding is done. MODE is the machine mode to use.
2920 UNSIGNEDP nonzero means do unsigned division. */
2922 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2923 and then correct it by or'ing in missing high bits
2924 if result of ANDI is nonzero.
2925 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2926 This could optimize to a bfexts instruction.
2927 But C doesn't use these operations, so their optimizations are
2928 left for later. */
2929 /* ??? For modulo, we don't actually need the highpart of the first product,
2930 the low part will do nicely. And for small divisors, the second multiply
2931 can also be a low-part only multiply or even be completely left out.
2932 E.g. to calculate the remainder of a division by 3 with a 32 bit
2933 multiply, multiply with 0x55555556 and extract the upper two bits;
2934 the result is exact for inputs up to 0x1fffffff.
2935 The input range can be reduced by using cross-sum rules.
2936 For odd divisors >= 3, the following table gives right shift counts
2937 so that if an number is shifted by an integer multiple of the given
2938 amount, the remainder stays the same:
2939 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2940 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2941 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2942 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2943 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2945 Cross-sum rules for even numbers can be derived by leaving as many bits
2946 to the right alone as the divisor has zeros to the right.
2947 E.g. if x is an unsigned 32 bit number:
2948 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2951 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2954 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2955 int rem_flag;
2956 enum tree_code code;
2957 enum machine_mode mode;
2958 register rtx op0, op1, target;
2959 int unsignedp;
2961 enum machine_mode compute_mode;
2962 register rtx tquotient;
2963 rtx quotient = 0, remainder = 0;
2964 rtx last;
2965 int size;
2966 rtx insn, set;
2967 optab optab1, optab2;
2968 int op1_is_constant, op1_is_pow2;
2969 int max_cost, extra_cost;
2970 static HOST_WIDE_INT last_div_const = 0;
2972 op1_is_constant = GET_CODE (op1) == CONST_INT;
2973 op1_is_pow2 = (op1_is_constant
2974 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2975 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2978 This is the structure of expand_divmod:
2980 First comes code to fix up the operands so we can perform the operations
2981 correctly and efficiently.
2983 Second comes a switch statement with code specific for each rounding mode.
2984 For some special operands this code emits all RTL for the desired
2985 operation, for other cases, it generates only a quotient and stores it in
2986 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2987 to indicate that it has not done anything.
2989 Last comes code that finishes the operation. If QUOTIENT is set and
2990 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2991 QUOTIENT is not set, it is computed using trunc rounding.
2993 We try to generate special code for division and remainder when OP1 is a
2994 constant. If |OP1| = 2**n we can use shifts and some other fast
2995 operations. For other values of OP1, we compute a carefully selected
2996 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2997 by m.
2999 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3000 half of the product. Different strategies for generating the product are
3001 implemented in expand_mult_highpart.
3003 If what we actually want is the remainder, we generate that by another
3004 by-constant multiplication and a subtraction. */
3006 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3007 code below will malfunction if we are, so check here and handle
3008 the special case if so. */
3009 if (op1 == const1_rtx)
3010 return rem_flag ? const0_rtx : op0;
3012 /* When dividing by -1, we could get an overflow.
3013 negv_optab can handle overflows. */
3014 if (! unsignedp && op1 == constm1_rtx)
3016 if (rem_flag)
3017 return const0_rtx;
3018 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3019 ? negv_optab : neg_optab, op0, target, 0);
3022 if (target
3023 /* Don't use the function value register as a target
3024 since we have to read it as well as write it,
3025 and function-inlining gets confused by this. */
3026 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3027 /* Don't clobber an operand while doing a multi-step calculation. */
3028 || ((rem_flag || op1_is_constant)
3029 && (reg_mentioned_p (target, op0)
3030 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3031 || reg_mentioned_p (target, op1)
3032 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3033 target = 0;
3035 /* Get the mode in which to perform this computation. Normally it will
3036 be MODE, but sometimes we can't do the desired operation in MODE.
3037 If so, pick a wider mode in which we can do the operation. Convert
3038 to that mode at the start to avoid repeated conversions.
3040 First see what operations we need. These depend on the expression
3041 we are evaluating. (We assume that divxx3 insns exist under the
3042 same conditions that modxx3 insns and that these insns don't normally
3043 fail. If these assumptions are not correct, we may generate less
3044 efficient code in some cases.)
3046 Then see if we find a mode in which we can open-code that operation
3047 (either a division, modulus, or shift). Finally, check for the smallest
3048 mode for which we can do the operation with a library call. */
3050 /* We might want to refine this now that we have division-by-constant
3051 optimization. Since expand_mult_highpart tries so many variants, it is
3052 not straightforward to generalize this. Maybe we should make an array
3053 of possible modes in init_expmed? Save this for GCC 2.7. */
3055 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
3056 : (unsignedp ? udiv_optab : sdiv_optab));
3057 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
3059 for (compute_mode = mode; compute_mode != VOIDmode;
3060 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3061 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3062 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3063 break;
3065 if (compute_mode == VOIDmode)
3066 for (compute_mode = mode; compute_mode != VOIDmode;
3067 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3068 if (optab1->handlers[(int) compute_mode].libfunc
3069 || optab2->handlers[(int) compute_mode].libfunc)
3070 break;
3072 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3073 in expand_binop. */
3074 if (compute_mode == VOIDmode)
3075 compute_mode = mode;
3077 if (target && GET_MODE (target) == compute_mode)
3078 tquotient = target;
3079 else
3080 tquotient = gen_reg_rtx (compute_mode);
3082 size = GET_MODE_BITSIZE (compute_mode);
3083 #if 0
3084 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3085 (mode), and thereby get better code when OP1 is a constant. Do that
3086 later. It will require going over all usages of SIZE below. */
3087 size = GET_MODE_BITSIZE (mode);
3088 #endif
3090 /* Only deduct something for a REM if the last divide done was
3091 for a different constant. Then set the constant of the last
3092 divide. */
3093 max_cost = div_cost[(int) compute_mode]
3094 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3095 && INTVAL (op1) == last_div_const)
3096 ? mul_cost[(int) compute_mode] + add_cost : 0);
3098 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3100 /* Now convert to the best mode to use. */
3101 if (compute_mode != mode)
3103 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3104 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3106 /* convert_modes may have placed op1 into a register, so we
3107 must recompute the following. */
3108 op1_is_constant = GET_CODE (op1) == CONST_INT;
3109 op1_is_pow2 = (op1_is_constant
3110 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3111 || (! unsignedp
3112 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3115 /* If one of the operands is a volatile MEM, copy it into a register. */
3117 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3118 op0 = force_reg (compute_mode, op0);
3119 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3120 op1 = force_reg (compute_mode, op1);
3122 /* If we need the remainder or if OP1 is constant, we need to
3123 put OP0 in a register in case it has any queued subexpressions. */
3124 if (rem_flag || op1_is_constant)
3125 op0 = force_reg (compute_mode, op0);
3127 last = get_last_insn ();
3129 /* Promote floor rounding to trunc rounding for unsigned operations. */
3130 if (unsignedp)
3132 if (code == FLOOR_DIV_EXPR)
3133 code = TRUNC_DIV_EXPR;
3134 if (code == FLOOR_MOD_EXPR)
3135 code = TRUNC_MOD_EXPR;
3136 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3137 code = TRUNC_DIV_EXPR;
3140 if (op1 != const0_rtx)
3141 switch (code)
3143 case TRUNC_MOD_EXPR:
3144 case TRUNC_DIV_EXPR:
3145 if (op1_is_constant)
3147 if (unsignedp)
3149 unsigned HOST_WIDE_INT mh, ml;
3150 int pre_shift, post_shift;
3151 int dummy;
3152 unsigned HOST_WIDE_INT d = INTVAL (op1);
3154 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3156 pre_shift = floor_log2 (d);
3157 if (rem_flag)
3159 remainder
3160 = expand_binop (compute_mode, and_optab, op0,
3161 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3162 remainder, 1,
3163 OPTAB_LIB_WIDEN);
3164 if (remainder)
3165 return gen_lowpart (mode, remainder);
3167 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3168 build_int_2 (pre_shift, 0),
3169 tquotient, 1);
3171 else if (size <= HOST_BITS_PER_WIDE_INT)
3173 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3175 /* Most significant bit of divisor is set; emit an scc
3176 insn. */
3177 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3178 compute_mode, 1, 1);
3179 if (quotient == 0)
3180 goto fail1;
3182 else
3184 /* Find a suitable multiplier and right shift count
3185 instead of multiplying with D. */
3187 mh = choose_multiplier (d, size, size,
3188 &ml, &post_shift, &dummy);
3190 /* If the suggested multiplier is more than SIZE bits,
3191 we can do better for even divisors, using an
3192 initial right shift. */
3193 if (mh != 0 && (d & 1) == 0)
3195 pre_shift = floor_log2 (d & -d);
3196 mh = choose_multiplier (d >> pre_shift, size,
3197 size - pre_shift,
3198 &ml, &post_shift, &dummy);
3199 if (mh)
3200 abort ();
3202 else
3203 pre_shift = 0;
3205 if (mh != 0)
3207 rtx t1, t2, t3, t4;
3209 if (post_shift - 1 >= BITS_PER_WORD)
3210 goto fail1;
3212 extra_cost = (shift_cost[post_shift - 1]
3213 + shift_cost[1] + 2 * add_cost);
3214 t1 = expand_mult_highpart (compute_mode, op0, ml,
3215 NULL_RTX, 1,
3216 max_cost - extra_cost);
3217 if (t1 == 0)
3218 goto fail1;
3219 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3220 op0, t1),
3221 NULL_RTX);
3222 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3223 build_int_2 (1, 0), NULL_RTX,1);
3224 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3225 t1, t3),
3226 NULL_RTX);
3227 quotient
3228 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3229 build_int_2 (post_shift - 1, 0),
3230 tquotient, 1);
3232 else
3234 rtx t1, t2;
3236 if (pre_shift >= BITS_PER_WORD
3237 || post_shift >= BITS_PER_WORD)
3238 goto fail1;
3240 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3241 build_int_2 (pre_shift, 0),
3242 NULL_RTX, 1);
3243 extra_cost = (shift_cost[pre_shift]
3244 + shift_cost[post_shift]);
3245 t2 = expand_mult_highpart (compute_mode, t1, ml,
3246 NULL_RTX, 1,
3247 max_cost - extra_cost);
3248 if (t2 == 0)
3249 goto fail1;
3250 quotient
3251 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3252 build_int_2 (post_shift, 0),
3253 tquotient, 1);
3257 else /* Too wide mode to use tricky code */
3258 break;
3260 insn = get_last_insn ();
3261 if (insn != last
3262 && (set = single_set (insn)) != 0
3263 && SET_DEST (set) == quotient)
3264 set_unique_reg_note (insn,
3265 REG_EQUAL,
3266 gen_rtx_UDIV (compute_mode, op0, op1));
3268 else /* TRUNC_DIV, signed */
3270 unsigned HOST_WIDE_INT ml;
3271 int lgup, post_shift;
3272 HOST_WIDE_INT d = INTVAL (op1);
3273 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3275 /* n rem d = n rem -d */
3276 if (rem_flag && d < 0)
3278 d = abs_d;
3279 op1 = GEN_INT (trunc_int_for_mode (abs_d, compute_mode));
3282 if (d == 1)
3283 quotient = op0;
3284 else if (d == -1)
3285 quotient = expand_unop (compute_mode, neg_optab, op0,
3286 tquotient, 0);
3287 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3289 /* This case is not handled correctly below. */
3290 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3291 compute_mode, 1, 1);
3292 if (quotient == 0)
3293 goto fail1;
3295 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3296 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3298 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3300 lgup = floor_log2 (abs_d);
3301 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3303 rtx label = gen_label_rtx ();
3304 rtx t1;
3306 t1 = copy_to_mode_reg (compute_mode, op0);
3307 do_cmp_and_jump (t1, const0_rtx, GE,
3308 compute_mode, label);
3309 expand_inc (t1, GEN_INT (trunc_int_for_mode
3310 (abs_d - 1, compute_mode)));
3311 emit_label (label);
3312 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3313 build_int_2 (lgup, 0),
3314 tquotient, 0);
3316 else
3318 rtx t1, t2, t3;
3319 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3320 build_int_2 (size - 1, 0),
3321 NULL_RTX, 0);
3322 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3323 build_int_2 (size - lgup, 0),
3324 NULL_RTX, 1);
3325 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3326 op0, t2),
3327 NULL_RTX);
3328 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3329 build_int_2 (lgup, 0),
3330 tquotient, 0);
3333 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3334 the quotient. */
3335 if (d < 0)
3337 insn = get_last_insn ();
3338 if (insn != last
3339 && (set = single_set (insn)) != 0
3340 && SET_DEST (set) == quotient
3341 && abs_d < ((unsigned HOST_WIDE_INT) 1
3342 << (HOST_BITS_PER_WIDE_INT - 1)))
3343 set_unique_reg_note (insn,
3344 REG_EQUAL,
3345 gen_rtx_DIV (compute_mode,
3346 op0,
3347 GEN_INT
3348 (trunc_int_for_mode
3349 (abs_d,
3350 compute_mode))));
3352 quotient = expand_unop (compute_mode, neg_optab,
3353 quotient, quotient, 0);
3356 else if (size <= HOST_BITS_PER_WIDE_INT)
3358 choose_multiplier (abs_d, size, size - 1,
3359 &ml, &post_shift, &lgup);
3360 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3362 rtx t1, t2, t3;
3364 if (post_shift >= BITS_PER_WORD
3365 || size - 1 >= BITS_PER_WORD)
3366 goto fail1;
3368 extra_cost = (shift_cost[post_shift]
3369 + shift_cost[size - 1] + add_cost);
3370 t1 = expand_mult_highpart (compute_mode, op0, ml,
3371 NULL_RTX, 0,
3372 max_cost - extra_cost);
3373 if (t1 == 0)
3374 goto fail1;
3375 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3376 build_int_2 (post_shift, 0), NULL_RTX, 0);
3377 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3378 build_int_2 (size - 1, 0), NULL_RTX, 0);
3379 if (d < 0)
3380 quotient
3381 = force_operand (gen_rtx_MINUS (compute_mode,
3382 t3, t2),
3383 tquotient);
3384 else
3385 quotient
3386 = force_operand (gen_rtx_MINUS (compute_mode,
3387 t2, t3),
3388 tquotient);
3390 else
3392 rtx t1, t2, t3, t4;
3394 if (post_shift >= BITS_PER_WORD
3395 || size - 1 >= BITS_PER_WORD)
3396 goto fail1;
3398 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3399 extra_cost = (shift_cost[post_shift]
3400 + shift_cost[size - 1] + 2 * add_cost);
3401 t1 = expand_mult_highpart (compute_mode, op0, ml,
3402 NULL_RTX, 0,
3403 max_cost - extra_cost);
3404 if (t1 == 0)
3405 goto fail1;
3406 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3407 t1, op0),
3408 NULL_RTX);
3409 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3410 build_int_2 (post_shift, 0),
3411 NULL_RTX, 0);
3412 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3413 build_int_2 (size - 1, 0),
3414 NULL_RTX, 0);
3415 if (d < 0)
3416 quotient
3417 = force_operand (gen_rtx_MINUS (compute_mode,
3418 t4, t3),
3419 tquotient);
3420 else
3421 quotient
3422 = force_operand (gen_rtx_MINUS (compute_mode,
3423 t3, t4),
3424 tquotient);
3427 else /* Too wide mode to use tricky code */
3428 break;
3430 insn = get_last_insn ();
3431 if (insn != last
3432 && (set = single_set (insn)) != 0
3433 && SET_DEST (set) == quotient)
3434 set_unique_reg_note (insn,
3435 REG_EQUAL,
3436 gen_rtx_DIV (compute_mode, op0, op1));
3438 break;
3440 fail1:
3441 delete_insns_since (last);
3442 break;
3444 case FLOOR_DIV_EXPR:
3445 case FLOOR_MOD_EXPR:
3446 /* We will come here only for signed operations. */
3447 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3449 unsigned HOST_WIDE_INT mh, ml;
3450 int pre_shift, lgup, post_shift;
3451 HOST_WIDE_INT d = INTVAL (op1);
3453 if (d > 0)
3455 /* We could just as easily deal with negative constants here,
3456 but it does not seem worth the trouble for GCC 2.6. */
3457 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3459 pre_shift = floor_log2 (d);
3460 if (rem_flag)
3462 remainder = expand_binop (compute_mode, and_optab, op0,
3463 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3464 remainder, 0, OPTAB_LIB_WIDEN);
3465 if (remainder)
3466 return gen_lowpart (mode, remainder);
3468 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3469 build_int_2 (pre_shift, 0),
3470 tquotient, 0);
3472 else
3474 rtx t1, t2, t3, t4;
3476 mh = choose_multiplier (d, size, size - 1,
3477 &ml, &post_shift, &lgup);
3478 if (mh)
3479 abort ();
3481 if (post_shift < BITS_PER_WORD
3482 && size - 1 < BITS_PER_WORD)
3484 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3485 build_int_2 (size - 1, 0),
3486 NULL_RTX, 0);
3487 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3488 NULL_RTX, 0, OPTAB_WIDEN);
3489 extra_cost = (shift_cost[post_shift]
3490 + shift_cost[size - 1] + 2 * add_cost);
3491 t3 = expand_mult_highpart (compute_mode, t2, ml,
3492 NULL_RTX, 1,
3493 max_cost - extra_cost);
3494 if (t3 != 0)
3496 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3497 build_int_2 (post_shift, 0),
3498 NULL_RTX, 1);
3499 quotient = expand_binop (compute_mode, xor_optab,
3500 t4, t1, tquotient, 0,
3501 OPTAB_WIDEN);
3506 else
3508 rtx nsign, t1, t2, t3, t4;
3509 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3510 op0, constm1_rtx), NULL_RTX);
3511 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3512 0, OPTAB_WIDEN);
3513 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3514 build_int_2 (size - 1, 0), NULL_RTX, 0);
3515 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3516 NULL_RTX);
3517 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3518 NULL_RTX, 0);
3519 if (t4)
3521 rtx t5;
3522 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3523 NULL_RTX, 0);
3524 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3525 t4, t5),
3526 tquotient);
3531 if (quotient != 0)
3532 break;
3533 delete_insns_since (last);
3535 /* Try using an instruction that produces both the quotient and
3536 remainder, using truncation. We can easily compensate the quotient
3537 or remainder to get floor rounding, once we have the remainder.
3538 Notice that we compute also the final remainder value here,
3539 and return the result right away. */
3540 if (target == 0 || GET_MODE (target) != compute_mode)
3541 target = gen_reg_rtx (compute_mode);
3543 if (rem_flag)
3545 remainder
3546 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3547 quotient = gen_reg_rtx (compute_mode);
3549 else
3551 quotient
3552 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3553 remainder = gen_reg_rtx (compute_mode);
3556 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3557 quotient, remainder, 0))
3559 /* This could be computed with a branch-less sequence.
3560 Save that for later. */
3561 rtx tem;
3562 rtx label = gen_label_rtx ();
3563 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3564 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3565 NULL_RTX, 0, OPTAB_WIDEN);
3566 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3567 expand_dec (quotient, const1_rtx);
3568 expand_inc (remainder, op1);
3569 emit_label (label);
3570 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3573 /* No luck with division elimination or divmod. Have to do it
3574 by conditionally adjusting op0 *and* the result. */
3576 rtx label1, label2, label3, label4, label5;
3577 rtx adjusted_op0;
3578 rtx tem;
3580 quotient = gen_reg_rtx (compute_mode);
3581 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3582 label1 = gen_label_rtx ();
3583 label2 = gen_label_rtx ();
3584 label3 = gen_label_rtx ();
3585 label4 = gen_label_rtx ();
3586 label5 = gen_label_rtx ();
3587 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3588 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3589 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3590 quotient, 0, OPTAB_LIB_WIDEN);
3591 if (tem != quotient)
3592 emit_move_insn (quotient, tem);
3593 emit_jump_insn (gen_jump (label5));
3594 emit_barrier ();
3595 emit_label (label1);
3596 expand_inc (adjusted_op0, const1_rtx);
3597 emit_jump_insn (gen_jump (label4));
3598 emit_barrier ();
3599 emit_label (label2);
3600 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3601 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3602 quotient, 0, OPTAB_LIB_WIDEN);
3603 if (tem != quotient)
3604 emit_move_insn (quotient, tem);
3605 emit_jump_insn (gen_jump (label5));
3606 emit_barrier ();
3607 emit_label (label3);
3608 expand_dec (adjusted_op0, const1_rtx);
3609 emit_label (label4);
3610 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3611 quotient, 0, OPTAB_LIB_WIDEN);
3612 if (tem != quotient)
3613 emit_move_insn (quotient, tem);
3614 expand_dec (quotient, const1_rtx);
3615 emit_label (label5);
3617 break;
3619 case CEIL_DIV_EXPR:
3620 case CEIL_MOD_EXPR:
3621 if (unsignedp)
3623 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3625 rtx t1, t2, t3;
3626 unsigned HOST_WIDE_INT d = INTVAL (op1);
3627 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3628 build_int_2 (floor_log2 (d), 0),
3629 tquotient, 1);
3630 t2 = expand_binop (compute_mode, and_optab, op0,
3631 GEN_INT (d - 1),
3632 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3633 t3 = gen_reg_rtx (compute_mode);
3634 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3635 compute_mode, 1, 1);
3636 if (t3 == 0)
3638 rtx lab;
3639 lab = gen_label_rtx ();
3640 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3641 expand_inc (t1, const1_rtx);
3642 emit_label (lab);
3643 quotient = t1;
3645 else
3646 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3647 t1, t3),
3648 tquotient);
3649 break;
3652 /* Try using an instruction that produces both the quotient and
3653 remainder, using truncation. We can easily compensate the
3654 quotient or remainder to get ceiling rounding, once we have the
3655 remainder. Notice that we compute also the final remainder
3656 value here, and return the result right away. */
3657 if (target == 0 || GET_MODE (target) != compute_mode)
3658 target = gen_reg_rtx (compute_mode);
3660 if (rem_flag)
3662 remainder = (GET_CODE (target) == REG
3663 ? target : gen_reg_rtx (compute_mode));
3664 quotient = gen_reg_rtx (compute_mode);
3666 else
3668 quotient = (GET_CODE (target) == REG
3669 ? target : gen_reg_rtx (compute_mode));
3670 remainder = gen_reg_rtx (compute_mode);
3673 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3674 remainder, 1))
3676 /* This could be computed with a branch-less sequence.
3677 Save that for later. */
3678 rtx label = gen_label_rtx ();
3679 do_cmp_and_jump (remainder, const0_rtx, EQ,
3680 compute_mode, label);
3681 expand_inc (quotient, const1_rtx);
3682 expand_dec (remainder, op1);
3683 emit_label (label);
3684 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3687 /* No luck with division elimination or divmod. Have to do it
3688 by conditionally adjusting op0 *and* the result. */
3690 rtx label1, label2;
3691 rtx adjusted_op0, tem;
3693 quotient = gen_reg_rtx (compute_mode);
3694 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3695 label1 = gen_label_rtx ();
3696 label2 = gen_label_rtx ();
3697 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3698 compute_mode, label1);
3699 emit_move_insn (quotient, const0_rtx);
3700 emit_jump_insn (gen_jump (label2));
3701 emit_barrier ();
3702 emit_label (label1);
3703 expand_dec (adjusted_op0, const1_rtx);
3704 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3705 quotient, 1, OPTAB_LIB_WIDEN);
3706 if (tem != quotient)
3707 emit_move_insn (quotient, tem);
3708 expand_inc (quotient, const1_rtx);
3709 emit_label (label2);
3712 else /* signed */
3714 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3715 && INTVAL (op1) >= 0)
3717 /* This is extremely similar to the code for the unsigned case
3718 above. For 2.7 we should merge these variants, but for
3719 2.6.1 I don't want to touch the code for unsigned since that
3720 get used in C. The signed case will only be used by other
3721 languages (Ada). */
3723 rtx t1, t2, t3;
3724 unsigned HOST_WIDE_INT d = INTVAL (op1);
3725 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3726 build_int_2 (floor_log2 (d), 0),
3727 tquotient, 0);
3728 t2 = expand_binop (compute_mode, and_optab, op0,
3729 GEN_INT (d - 1),
3730 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3731 t3 = gen_reg_rtx (compute_mode);
3732 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3733 compute_mode, 1, 1);
3734 if (t3 == 0)
3736 rtx lab;
3737 lab = gen_label_rtx ();
3738 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3739 expand_inc (t1, const1_rtx);
3740 emit_label (lab);
3741 quotient = t1;
3743 else
3744 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3745 t1, t3),
3746 tquotient);
3747 break;
3750 /* Try using an instruction that produces both the quotient and
3751 remainder, using truncation. We can easily compensate the
3752 quotient or remainder to get ceiling rounding, once we have the
3753 remainder. Notice that we compute also the final remainder
3754 value here, and return the result right away. */
3755 if (target == 0 || GET_MODE (target) != compute_mode)
3756 target = gen_reg_rtx (compute_mode);
3757 if (rem_flag)
3759 remainder= (GET_CODE (target) == REG
3760 ? target : gen_reg_rtx (compute_mode));
3761 quotient = gen_reg_rtx (compute_mode);
3763 else
3765 quotient = (GET_CODE (target) == REG
3766 ? target : gen_reg_rtx (compute_mode));
3767 remainder = gen_reg_rtx (compute_mode);
3770 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3771 remainder, 0))
3773 /* This could be computed with a branch-less sequence.
3774 Save that for later. */
3775 rtx tem;
3776 rtx label = gen_label_rtx ();
3777 do_cmp_and_jump (remainder, const0_rtx, EQ,
3778 compute_mode, label);
3779 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3780 NULL_RTX, 0, OPTAB_WIDEN);
3781 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3782 expand_inc (quotient, const1_rtx);
3783 expand_dec (remainder, op1);
3784 emit_label (label);
3785 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3788 /* No luck with division elimination or divmod. Have to do it
3789 by conditionally adjusting op0 *and* the result. */
3791 rtx label1, label2, label3, label4, label5;
3792 rtx adjusted_op0;
3793 rtx tem;
3795 quotient = gen_reg_rtx (compute_mode);
3796 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3797 label1 = gen_label_rtx ();
3798 label2 = gen_label_rtx ();
3799 label3 = gen_label_rtx ();
3800 label4 = gen_label_rtx ();
3801 label5 = gen_label_rtx ();
3802 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3803 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3804 compute_mode, label1);
3805 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3806 quotient, 0, OPTAB_LIB_WIDEN);
3807 if (tem != quotient)
3808 emit_move_insn (quotient, tem);
3809 emit_jump_insn (gen_jump (label5));
3810 emit_barrier ();
3811 emit_label (label1);
3812 expand_dec (adjusted_op0, const1_rtx);
3813 emit_jump_insn (gen_jump (label4));
3814 emit_barrier ();
3815 emit_label (label2);
3816 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3817 compute_mode, label3);
3818 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3819 quotient, 0, OPTAB_LIB_WIDEN);
3820 if (tem != quotient)
3821 emit_move_insn (quotient, tem);
3822 emit_jump_insn (gen_jump (label5));
3823 emit_barrier ();
3824 emit_label (label3);
3825 expand_inc (adjusted_op0, const1_rtx);
3826 emit_label (label4);
3827 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3828 quotient, 0, OPTAB_LIB_WIDEN);
3829 if (tem != quotient)
3830 emit_move_insn (quotient, tem);
3831 expand_inc (quotient, const1_rtx);
3832 emit_label (label5);
3835 break;
3837 case EXACT_DIV_EXPR:
3838 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3840 HOST_WIDE_INT d = INTVAL (op1);
3841 unsigned HOST_WIDE_INT ml;
3842 int pre_shift;
3843 rtx t1;
3845 pre_shift = floor_log2 (d & -d);
3846 ml = invert_mod2n (d >> pre_shift, size);
3847 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3848 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
3849 quotient = expand_mult (compute_mode, t1,
3850 GEN_INT (trunc_int_for_mode
3851 (ml, compute_mode)),
3852 NULL_RTX, 0);
3854 insn = get_last_insn ();
3855 set_unique_reg_note (insn,
3856 REG_EQUAL,
3857 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3858 compute_mode,
3859 op0, op1));
3861 break;
3863 case ROUND_DIV_EXPR:
3864 case ROUND_MOD_EXPR:
3865 if (unsignedp)
3867 rtx tem;
3868 rtx label;
3869 label = gen_label_rtx ();
3870 quotient = gen_reg_rtx (compute_mode);
3871 remainder = gen_reg_rtx (compute_mode);
3872 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3874 rtx tem;
3875 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3876 quotient, 1, OPTAB_LIB_WIDEN);
3877 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3878 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3879 remainder, 1, OPTAB_LIB_WIDEN);
3881 tem = plus_constant (op1, -1);
3882 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3883 build_int_2 (1, 0), NULL_RTX, 1);
3884 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3885 expand_inc (quotient, const1_rtx);
3886 expand_dec (remainder, op1);
3887 emit_label (label);
3889 else
3891 rtx abs_rem, abs_op1, tem, mask;
3892 rtx label;
3893 label = gen_label_rtx ();
3894 quotient = gen_reg_rtx (compute_mode);
3895 remainder = gen_reg_rtx (compute_mode);
3896 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3898 rtx tem;
3899 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3900 quotient, 0, OPTAB_LIB_WIDEN);
3901 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3902 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3903 remainder, 0, OPTAB_LIB_WIDEN);
3905 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
3906 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
3907 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3908 build_int_2 (1, 0), NULL_RTX, 1);
3909 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3910 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3911 NULL_RTX, 0, OPTAB_WIDEN);
3912 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3913 build_int_2 (size - 1, 0), NULL_RTX, 0);
3914 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3915 NULL_RTX, 0, OPTAB_WIDEN);
3916 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3917 NULL_RTX, 0, OPTAB_WIDEN);
3918 expand_inc (quotient, tem);
3919 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3920 NULL_RTX, 0, OPTAB_WIDEN);
3921 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3922 NULL_RTX, 0, OPTAB_WIDEN);
3923 expand_dec (remainder, tem);
3924 emit_label (label);
3926 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3928 default:
3929 abort ();
3932 if (quotient == 0)
3934 if (target && GET_MODE (target) != compute_mode)
3935 target = 0;
3937 if (rem_flag)
3939 /* Try to produce the remainder without producing the quotient.
3940 If we seem to have a divmod patten that does not require widening,
3941 don't try windening here. We should really have an WIDEN argument
3942 to expand_twoval_binop, since what we'd really like to do here is
3943 1) try a mod insn in compute_mode
3944 2) try a divmod insn in compute_mode
3945 3) try a div insn in compute_mode and multiply-subtract to get
3946 remainder
3947 4) try the same things with widening allowed. */
3948 remainder
3949 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3950 op0, op1, target,
3951 unsignedp,
3952 ((optab2->handlers[(int) compute_mode].insn_code
3953 != CODE_FOR_nothing)
3954 ? OPTAB_DIRECT : OPTAB_WIDEN));
3955 if (remainder == 0)
3957 /* No luck there. Can we do remainder and divide at once
3958 without a library call? */
3959 remainder = gen_reg_rtx (compute_mode);
3960 if (! expand_twoval_binop ((unsignedp
3961 ? udivmod_optab
3962 : sdivmod_optab),
3963 op0, op1,
3964 NULL_RTX, remainder, unsignedp))
3965 remainder = 0;
3968 if (remainder)
3969 return gen_lowpart (mode, remainder);
3972 /* Produce the quotient. Try a quotient insn, but not a library call.
3973 If we have a divmod in this mode, use it in preference to widening
3974 the div (for this test we assume it will not fail). Note that optab2
3975 is set to the one of the two optabs that the call below will use. */
3976 quotient
3977 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3978 op0, op1, rem_flag ? NULL_RTX : target,
3979 unsignedp,
3980 ((optab2->handlers[(int) compute_mode].insn_code
3981 != CODE_FOR_nothing)
3982 ? OPTAB_DIRECT : OPTAB_WIDEN));
3984 if (quotient == 0)
3986 /* No luck there. Try a quotient-and-remainder insn,
3987 keeping the quotient alone. */
3988 quotient = gen_reg_rtx (compute_mode);
3989 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3990 op0, op1,
3991 quotient, NULL_RTX, unsignedp))
3993 quotient = 0;
3994 if (! rem_flag)
3995 /* Still no luck. If we are not computing the remainder,
3996 use a library call for the quotient. */
3997 quotient = sign_expand_binop (compute_mode,
3998 udiv_optab, sdiv_optab,
3999 op0, op1, target,
4000 unsignedp, OPTAB_LIB_WIDEN);
4005 if (rem_flag)
4007 if (target && GET_MODE (target) != compute_mode)
4008 target = 0;
4010 if (quotient == 0)
4011 /* No divide instruction either. Use library for remainder. */
4012 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4013 op0, op1, target,
4014 unsignedp, OPTAB_LIB_WIDEN);
4015 else
4017 /* We divided. Now finish doing X - Y * (X / Y). */
4018 remainder = expand_mult (compute_mode, quotient, op1,
4019 NULL_RTX, unsignedp);
4020 remainder = expand_binop (compute_mode, sub_optab, op0,
4021 remainder, target, unsignedp,
4022 OPTAB_LIB_WIDEN);
4026 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4029 /* Return a tree node with data type TYPE, describing the value of X.
4030 Usually this is an RTL_EXPR, if there is no obvious better choice.
4031 X may be an expression, however we only support those expressions
4032 generated by loop.c. */
4034 tree
4035 make_tree (type, x)
4036 tree type;
4037 rtx x;
4039 tree t;
4041 switch (GET_CODE (x))
4043 case CONST_INT:
4044 t = build_int_2 (INTVAL (x),
4045 (TREE_UNSIGNED (type)
4046 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
4047 || INTVAL (x) >= 0 ? 0 : -1);
4048 TREE_TYPE (t) = type;
4049 return t;
4051 case CONST_DOUBLE:
4052 if (GET_MODE (x) == VOIDmode)
4054 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4055 TREE_TYPE (t) = type;
4057 else
4059 REAL_VALUE_TYPE d;
4061 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4062 t = build_real (type, d);
4065 return t;
4067 case PLUS:
4068 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4069 make_tree (type, XEXP (x, 1))));
4071 case MINUS:
4072 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4073 make_tree (type, XEXP (x, 1))));
4075 case NEG:
4076 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4078 case MULT:
4079 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4080 make_tree (type, XEXP (x, 1))));
4082 case ASHIFT:
4083 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4084 make_tree (type, XEXP (x, 1))));
4086 case LSHIFTRT:
4087 return fold (convert (type,
4088 build (RSHIFT_EXPR, unsigned_type (type),
4089 make_tree (unsigned_type (type),
4090 XEXP (x, 0)),
4091 make_tree (type, XEXP (x, 1)))));
4093 case ASHIFTRT:
4094 return fold (convert (type,
4095 build (RSHIFT_EXPR, signed_type (type),
4096 make_tree (signed_type (type), XEXP (x, 0)),
4097 make_tree (type, XEXP (x, 1)))));
4099 case DIV:
4100 if (TREE_CODE (type) != REAL_TYPE)
4101 t = signed_type (type);
4102 else
4103 t = type;
4105 return fold (convert (type,
4106 build (TRUNC_DIV_EXPR, t,
4107 make_tree (t, XEXP (x, 0)),
4108 make_tree (t, XEXP (x, 1)))));
4109 case UDIV:
4110 t = unsigned_type (type);
4111 return fold (convert (type,
4112 build (TRUNC_DIV_EXPR, t,
4113 make_tree (t, XEXP (x, 0)),
4114 make_tree (t, XEXP (x, 1)))));
4115 default:
4116 t = make_node (RTL_EXPR);
4117 TREE_TYPE (t) = type;
4119 #ifdef POINTERS_EXTEND_UNSIGNED
4120 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4121 ptr_mode. So convert. */
4122 if (POINTER_TYPE_P (type) && GET_MODE (x) != TYPE_MODE (type))
4123 x = convert_memory_address (TYPE_MODE (type), x);
4124 #endif
4126 RTL_EXPR_RTL (t) = x;
4127 /* There are no insns to be output
4128 when this rtl_expr is used. */
4129 RTL_EXPR_SEQUENCE (t) = 0;
4130 return t;
4134 /* Return an rtx representing the value of X * MULT + ADD.
4135 TARGET is a suggestion for where to store the result (an rtx).
4136 MODE is the machine mode for the computation.
4137 X and MULT must have mode MODE. ADD may have a different mode.
4138 So can X (defaults to same as MODE).
4139 UNSIGNEDP is non-zero to do unsigned multiplication.
4140 This may emit insns. */
4143 expand_mult_add (x, target, mult, add, mode, unsignedp)
4144 rtx x, target, mult, add;
4145 enum machine_mode mode;
4146 int unsignedp;
4148 tree type = type_for_mode (mode, unsignedp);
4149 tree add_type = (GET_MODE (add) == VOIDmode
4150 ? type : type_for_mode (GET_MODE (add), unsignedp));
4151 tree result = fold (build (PLUS_EXPR, type,
4152 fold (build (MULT_EXPR, type,
4153 make_tree (type, x),
4154 make_tree (type, mult))),
4155 make_tree (add_type, add)));
4157 return expand_expr (result, target, VOIDmode, 0);
4160 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4161 and returning TARGET.
4163 If TARGET is 0, a pseudo-register or constant is returned. */
4166 expand_and (op0, op1, target)
4167 rtx op0, op1, target;
4169 enum machine_mode mode = VOIDmode;
4170 rtx tem;
4172 if (GET_MODE (op0) != VOIDmode)
4173 mode = GET_MODE (op0);
4174 else if (GET_MODE (op1) != VOIDmode)
4175 mode = GET_MODE (op1);
4177 if (mode != VOIDmode)
4178 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4179 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
4180 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
4181 else
4182 abort ();
4184 if (target == 0)
4185 target = tem;
4186 else if (tem != target)
4187 emit_move_insn (target, tem);
4188 return target;
4191 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4192 and storing in TARGET. Normally return TARGET.
4193 Return 0 if that cannot be done.
4195 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4196 it is VOIDmode, they cannot both be CONST_INT.
4198 UNSIGNEDP is for the case where we have to widen the operands
4199 to perform the operation. It says to use zero-extension.
4201 NORMALIZEP is 1 if we should convert the result to be either zero
4202 or one. Normalize is -1 if we should convert the result to be
4203 either zero or -1. If NORMALIZEP is zero, the result will be left
4204 "raw" out of the scc insn. */
4207 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4208 rtx target;
4209 enum rtx_code code;
4210 rtx op0, op1;
4211 enum machine_mode mode;
4212 int unsignedp;
4213 int normalizep;
4215 rtx subtarget;
4216 enum insn_code icode;
4217 enum machine_mode compare_mode;
4218 enum machine_mode target_mode = GET_MODE (target);
4219 rtx tem;
4220 rtx last = get_last_insn ();
4221 rtx pattern, comparison;
4223 if (unsignedp)
4224 code = unsigned_condition (code);
4226 /* If one operand is constant, make it the second one. Only do this
4227 if the other operand is not constant as well. */
4229 if (swap_commutative_operands_p (op0, op1))
4231 tem = op0;
4232 op0 = op1;
4233 op1 = tem;
4234 code = swap_condition (code);
4237 if (mode == VOIDmode)
4238 mode = GET_MODE (op0);
4240 /* For some comparisons with 1 and -1, we can convert this to
4241 comparisons with zero. This will often produce more opportunities for
4242 store-flag insns. */
4244 switch (code)
4246 case LT:
4247 if (op1 == const1_rtx)
4248 op1 = const0_rtx, code = LE;
4249 break;
4250 case LE:
4251 if (op1 == constm1_rtx)
4252 op1 = const0_rtx, code = LT;
4253 break;
4254 case GE:
4255 if (op1 == const1_rtx)
4256 op1 = const0_rtx, code = GT;
4257 break;
4258 case GT:
4259 if (op1 == constm1_rtx)
4260 op1 = const0_rtx, code = GE;
4261 break;
4262 case GEU:
4263 if (op1 == const1_rtx)
4264 op1 = const0_rtx, code = NE;
4265 break;
4266 case LTU:
4267 if (op1 == const1_rtx)
4268 op1 = const0_rtx, code = EQ;
4269 break;
4270 default:
4271 break;
4274 /* If we are comparing a double-word integer with zero, we can convert
4275 the comparison into one involving a single word. */
4276 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4277 && GET_MODE_CLASS (mode) == MODE_INT
4278 && op1 == const0_rtx)
4280 if (code == EQ || code == NE)
4282 /* Do a logical OR of the two words and compare the result. */
4283 rtx op0h = gen_highpart (word_mode, op0);
4284 rtx op0l = gen_lowpart (word_mode, op0);
4285 rtx op0both = expand_binop (word_mode, ior_optab, op0h, op0l,
4286 NULL_RTX, unsignedp, OPTAB_DIRECT);
4287 if (op0both != 0)
4288 return emit_store_flag (target, code, op0both, op1, word_mode,
4289 unsignedp, normalizep);
4291 else if (code == LT || code == GE)
4292 /* If testing the sign bit, can just test on high word. */
4293 return emit_store_flag (target, code, gen_highpart (word_mode, op0),
4294 op1, word_mode, unsignedp, normalizep);
4297 /* From now on, we won't change CODE, so set ICODE now. */
4298 icode = setcc_gen_code[(int) code];
4300 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4301 complement of A (for GE) and shifting the sign bit to the low bit. */
4302 if (op1 == const0_rtx && (code == LT || code == GE)
4303 && GET_MODE_CLASS (mode) == MODE_INT
4304 && (normalizep || STORE_FLAG_VALUE == 1
4305 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4306 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4307 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4309 subtarget = target;
4311 /* If the result is to be wider than OP0, it is best to convert it
4312 first. If it is to be narrower, it is *incorrect* to convert it
4313 first. */
4314 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4316 op0 = protect_from_queue (op0, 0);
4317 op0 = convert_modes (target_mode, mode, op0, 0);
4318 mode = target_mode;
4321 if (target_mode != mode)
4322 subtarget = 0;
4324 if (code == GE)
4325 op0 = expand_unop (mode, one_cmpl_optab, op0,
4326 ((STORE_FLAG_VALUE == 1 || normalizep)
4327 ? 0 : subtarget), 0);
4329 if (STORE_FLAG_VALUE == 1 || normalizep)
4330 /* If we are supposed to produce a 0/1 value, we want to do
4331 a logical shift from the sign bit to the low-order bit; for
4332 a -1/0 value, we do an arithmetic shift. */
4333 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4334 size_int (GET_MODE_BITSIZE (mode) - 1),
4335 subtarget, normalizep != -1);
4337 if (mode != target_mode)
4338 op0 = convert_modes (target_mode, mode, op0, 0);
4340 return op0;
4343 if (icode != CODE_FOR_nothing)
4345 insn_operand_predicate_fn pred;
4347 /* We think we may be able to do this with a scc insn. Emit the
4348 comparison and then the scc insn.
4350 compare_from_rtx may call emit_queue, which would be deleted below
4351 if the scc insn fails. So call it ourselves before setting LAST.
4352 Likewise for do_pending_stack_adjust. */
4354 emit_queue ();
4355 do_pending_stack_adjust ();
4356 last = get_last_insn ();
4358 comparison
4359 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4360 if (GET_CODE (comparison) == CONST_INT)
4361 return (comparison == const0_rtx ? const0_rtx
4362 : normalizep == 1 ? const1_rtx
4363 : normalizep == -1 ? constm1_rtx
4364 : const_true_rtx);
4366 /* If the code of COMPARISON doesn't match CODE, something is
4367 wrong; we can no longer be sure that we have the operation.
4368 We could handle this case, but it should not happen. */
4370 if (GET_CODE (comparison) != code)
4371 abort ();
4373 /* Get a reference to the target in the proper mode for this insn. */
4374 compare_mode = insn_data[(int) icode].operand[0].mode;
4375 subtarget = target;
4376 pred = insn_data[(int) icode].operand[0].predicate;
4377 if (preserve_subexpressions_p ()
4378 || ! (*pred) (subtarget, compare_mode))
4379 subtarget = gen_reg_rtx (compare_mode);
4381 pattern = GEN_FCN (icode) (subtarget);
4382 if (pattern)
4384 emit_insn (pattern);
4386 /* If we are converting to a wider mode, first convert to
4387 TARGET_MODE, then normalize. This produces better combining
4388 opportunities on machines that have a SIGN_EXTRACT when we are
4389 testing a single bit. This mostly benefits the 68k.
4391 If STORE_FLAG_VALUE does not have the sign bit set when
4392 interpreted in COMPARE_MODE, we can do this conversion as
4393 unsigned, which is usually more efficient. */
4394 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4396 convert_move (target, subtarget,
4397 (GET_MODE_BITSIZE (compare_mode)
4398 <= HOST_BITS_PER_WIDE_INT)
4399 && 0 == (STORE_FLAG_VALUE
4400 & ((HOST_WIDE_INT) 1
4401 << (GET_MODE_BITSIZE (compare_mode) -1))));
4402 op0 = target;
4403 compare_mode = target_mode;
4405 else
4406 op0 = subtarget;
4408 /* If we want to keep subexpressions around, don't reuse our
4409 last target. */
4411 if (preserve_subexpressions_p ())
4412 subtarget = 0;
4414 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4415 we don't have to do anything. */
4416 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4418 /* STORE_FLAG_VALUE might be the most negative number, so write
4419 the comparison this way to avoid a compiler-time warning. */
4420 else if (- normalizep == STORE_FLAG_VALUE)
4421 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4423 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4424 makes it hard to use a value of just the sign bit due to
4425 ANSI integer constant typing rules. */
4426 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4427 && (STORE_FLAG_VALUE
4428 & ((HOST_WIDE_INT) 1
4429 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4430 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4431 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4432 subtarget, normalizep == 1);
4433 else if (STORE_FLAG_VALUE & 1)
4435 op0 = expand_and (op0, const1_rtx, subtarget);
4436 if (normalizep == -1)
4437 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4439 else
4440 abort ();
4442 /* If we were converting to a smaller mode, do the
4443 conversion now. */
4444 if (target_mode != compare_mode)
4446 convert_move (target, op0, 0);
4447 return target;
4449 else
4450 return op0;
4454 delete_insns_since (last);
4456 /* If expensive optimizations, use different pseudo registers for each
4457 insn, instead of reusing the same pseudo. This leads to better CSE,
4458 but slows down the compiler, since there are more pseudos */
4459 subtarget = (!flag_expensive_optimizations
4460 && (target_mode == mode)) ? target : NULL_RTX;
4462 /* If we reached here, we can't do this with a scc insn. However, there
4463 are some comparisons that can be done directly. For example, if
4464 this is an equality comparison of integers, we can try to exclusive-or
4465 (or subtract) the two operands and use a recursive call to try the
4466 comparison with zero. Don't do any of these cases if branches are
4467 very cheap. */
4469 if (BRANCH_COST > 0
4470 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4471 && op1 != const0_rtx)
4473 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4474 OPTAB_WIDEN);
4476 if (tem == 0)
4477 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4478 OPTAB_WIDEN);
4479 if (tem != 0)
4480 tem = emit_store_flag (target, code, tem, const0_rtx,
4481 mode, unsignedp, normalizep);
4482 if (tem == 0)
4483 delete_insns_since (last);
4484 return tem;
4487 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4488 the constant zero. Reject all other comparisons at this point. Only
4489 do LE and GT if branches are expensive since they are expensive on
4490 2-operand machines. */
4492 if (BRANCH_COST == 0
4493 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4494 || (code != EQ && code != NE
4495 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4496 return 0;
4498 /* See what we need to return. We can only return a 1, -1, or the
4499 sign bit. */
4501 if (normalizep == 0)
4503 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4504 normalizep = STORE_FLAG_VALUE;
4506 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4507 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4508 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4510 else
4511 return 0;
4514 /* Try to put the result of the comparison in the sign bit. Assume we can't
4515 do the necessary operation below. */
4517 tem = 0;
4519 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4520 the sign bit set. */
4522 if (code == LE)
4524 /* This is destructive, so SUBTARGET can't be OP0. */
4525 if (rtx_equal_p (subtarget, op0))
4526 subtarget = 0;
4528 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4529 OPTAB_WIDEN);
4530 if (tem)
4531 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4532 OPTAB_WIDEN);
4535 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4536 number of bits in the mode of OP0, minus one. */
4538 if (code == GT)
4540 if (rtx_equal_p (subtarget, op0))
4541 subtarget = 0;
4543 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4544 size_int (GET_MODE_BITSIZE (mode) - 1),
4545 subtarget, 0);
4546 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4547 OPTAB_WIDEN);
4550 if (code == EQ || code == NE)
4552 /* For EQ or NE, one way to do the comparison is to apply an operation
4553 that converts the operand into a positive number if it is non-zero
4554 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4555 for NE we negate. This puts the result in the sign bit. Then we
4556 normalize with a shift, if needed.
4558 Two operations that can do the above actions are ABS and FFS, so try
4559 them. If that doesn't work, and MODE is smaller than a full word,
4560 we can use zero-extension to the wider mode (an unsigned conversion)
4561 as the operation. */
4563 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4564 that is compensated by the subsequent overflow when subtracting
4565 one / negating. */
4567 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4568 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4569 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4570 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4571 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4573 op0 = protect_from_queue (op0, 0);
4574 tem = convert_modes (word_mode, mode, op0, 1);
4575 mode = word_mode;
4578 if (tem != 0)
4580 if (code == EQ)
4581 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4582 0, OPTAB_WIDEN);
4583 else
4584 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4587 /* If we couldn't do it that way, for NE we can "or" the two's complement
4588 of the value with itself. For EQ, we take the one's complement of
4589 that "or", which is an extra insn, so we only handle EQ if branches
4590 are expensive. */
4592 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4594 if (rtx_equal_p (subtarget, op0))
4595 subtarget = 0;
4597 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4598 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4599 OPTAB_WIDEN);
4601 if (tem && code == EQ)
4602 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4606 if (tem && normalizep)
4607 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4608 size_int (GET_MODE_BITSIZE (mode) - 1),
4609 subtarget, normalizep == 1);
4611 if (tem)
4613 if (GET_MODE (tem) != target_mode)
4615 convert_move (target, tem, 0);
4616 tem = target;
4618 else if (!subtarget)
4620 emit_move_insn (target, tem);
4621 tem = target;
4624 else
4625 delete_insns_since (last);
4627 return tem;
4630 /* Like emit_store_flag, but always succeeds. */
4633 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4634 rtx target;
4635 enum rtx_code code;
4636 rtx op0, op1;
4637 enum machine_mode mode;
4638 int unsignedp;
4639 int normalizep;
4641 rtx tem, label;
4643 /* First see if emit_store_flag can do the job. */
4644 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4645 if (tem != 0)
4646 return tem;
4648 if (normalizep == 0)
4649 normalizep = 1;
4651 /* If this failed, we have to do this with set/compare/jump/set code. */
4653 if (GET_CODE (target) != REG
4654 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4655 target = gen_reg_rtx (GET_MODE (target));
4657 emit_move_insn (target, const1_rtx);
4658 label = gen_label_rtx ();
4659 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, 0,
4660 NULL_RTX, label);
4662 emit_move_insn (target, const0_rtx);
4663 emit_label (label);
4665 return target;
4668 /* Perform possibly multi-word comparison and conditional jump to LABEL
4669 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4671 The algorithm is based on the code in expr.c:do_jump.
4673 Note that this does not perform a general comparison. Only variants
4674 generated within expmed.c are correctly handled, others abort (but could
4675 be handled if needed). */
4677 static void
4678 do_cmp_and_jump (arg1, arg2, op, mode, label)
4679 rtx arg1, arg2, label;
4680 enum rtx_code op;
4681 enum machine_mode mode;
4683 /* If this mode is an integer too wide to compare properly,
4684 compare word by word. Rely on cse to optimize constant cases. */
4686 if (GET_MODE_CLASS (mode) == MODE_INT
4687 && ! can_compare_p (op, mode, ccp_jump))
4689 rtx label2 = gen_label_rtx ();
4691 switch (op)
4693 case LTU:
4694 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4695 break;
4697 case LEU:
4698 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4699 break;
4701 case LT:
4702 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4703 break;
4705 case GT:
4706 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4707 break;
4709 case GE:
4710 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4711 break;
4713 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4714 that's the only equality operations we do */
4715 case EQ:
4716 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4717 abort();
4718 do_jump_by_parts_equality_rtx (arg1, label2, label);
4719 break;
4721 case NE:
4722 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4723 abort();
4724 do_jump_by_parts_equality_rtx (arg1, label, label2);
4725 break;
4727 default:
4728 abort();
4731 emit_label (label2);
4733 else
4735 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, 0, label);