2001-04-09 Andrew MacLeod <amacleod@redhat.com>
[official-gcc.git] / gcc / expmed.c
bloba7684508b5bcf730748362bea6016b72f9b049ad
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001 Free Software Foundation, Inc.
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "toplev.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "expr.h"
33 #include "real.h"
34 #include "recog.h"
36 static void store_fixed_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
37 unsigned HOST_WIDE_INT,
38 unsigned HOST_WIDE_INT, rtx,
39 unsigned int));
40 static void store_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
41 unsigned HOST_WIDE_INT, rtx,
42 unsigned int));
43 static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx,
44 unsigned HOST_WIDE_INT,
45 unsigned HOST_WIDE_INT,
46 unsigned HOST_WIDE_INT,
47 rtx, int, unsigned int));
48 static rtx mask_rtx PARAMS ((enum machine_mode, int,
49 int, int));
50 static rtx lshift_value PARAMS ((enum machine_mode, rtx,
51 int, int));
52 static rtx extract_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
53 unsigned HOST_WIDE_INT, int,
54 unsigned int));
55 static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
56 enum machine_mode, rtx));
58 /* Non-zero means divides or modulus operations are relatively cheap for
59 powers of two, so don't use branches; emit the operation instead.
60 Usually, this will mean that the MD file will emit non-branch
61 sequences. */
63 static int sdiv_pow2_cheap, smod_pow2_cheap;
65 #ifndef SLOW_UNALIGNED_ACCESS
66 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
67 #endif
69 /* For compilers that support multiple targets with different word sizes,
70 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
71 is the H8/300(H) compiler. */
73 #ifndef MAX_BITS_PER_WORD
74 #define MAX_BITS_PER_WORD BITS_PER_WORD
75 #endif
77 /* Cost of various pieces of RTL. Note that some of these are indexed by
78 shift count and some by mode. */
79 static int add_cost, negate_cost, zero_cost;
80 static int shift_cost[MAX_BITS_PER_WORD];
81 static int shiftadd_cost[MAX_BITS_PER_WORD];
82 static int shiftsub_cost[MAX_BITS_PER_WORD];
83 static int mul_cost[NUM_MACHINE_MODES];
84 static int div_cost[NUM_MACHINE_MODES];
85 static int mul_widen_cost[NUM_MACHINE_MODES];
86 static int mul_highpart_cost[NUM_MACHINE_MODES];
88 void
89 init_expmed ()
91 /* This is "some random pseudo register" for purposes of calling recog
92 to see what insns exist. */
93 rtx reg = gen_rtx_REG (word_mode, 10000);
94 rtx shift_insn, shiftadd_insn, shiftsub_insn;
95 int dummy;
96 int m;
97 enum machine_mode mode, wider_mode;
99 start_sequence ();
101 reg = gen_rtx_REG (word_mode, 10000);
103 zero_cost = rtx_cost (const0_rtx, 0);
104 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
106 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
107 gen_rtx_ASHIFT (word_mode, reg,
108 const0_rtx)));
110 shiftadd_insn
111 = emit_insn (gen_rtx_SET (VOIDmode, reg,
112 gen_rtx_PLUS (word_mode,
113 gen_rtx_MULT (word_mode,
114 reg, const0_rtx),
115 reg)));
117 shiftsub_insn
118 = emit_insn (gen_rtx_SET (VOIDmode, reg,
119 gen_rtx_MINUS (word_mode,
120 gen_rtx_MULT (word_mode,
121 reg, const0_rtx),
122 reg)));
124 init_recog ();
126 shift_cost[0] = 0;
127 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
129 for (m = 1; m < MAX_BITS_PER_WORD; m++)
131 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
133 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
134 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
135 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
137 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
138 = GEN_INT ((HOST_WIDE_INT) 1 << m);
139 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
140 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
142 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
143 = GEN_INT ((HOST_WIDE_INT) 1 << m);
144 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
145 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
148 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
150 sdiv_pow2_cheap
151 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
152 <= 2 * add_cost);
153 smod_pow2_cheap
154 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
155 <= 2 * add_cost);
157 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
158 mode != VOIDmode;
159 mode = GET_MODE_WIDER_MODE (mode))
161 reg = gen_rtx_REG (mode, 10000);
162 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
163 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
164 wider_mode = GET_MODE_WIDER_MODE (mode);
165 if (wider_mode != VOIDmode)
167 mul_widen_cost[(int) wider_mode]
168 = rtx_cost (gen_rtx_MULT (wider_mode,
169 gen_rtx_ZERO_EXTEND (wider_mode, reg),
170 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
171 SET);
172 mul_highpart_cost[(int) mode]
173 = rtx_cost (gen_rtx_TRUNCATE
174 (mode,
175 gen_rtx_LSHIFTRT (wider_mode,
176 gen_rtx_MULT (wider_mode,
177 gen_rtx_ZERO_EXTEND
178 (wider_mode, reg),
179 gen_rtx_ZERO_EXTEND
180 (wider_mode, reg)),
181 GEN_INT (GET_MODE_BITSIZE (mode)))),
182 SET);
186 end_sequence ();
189 /* Return an rtx representing minus the value of X.
190 MODE is the intended mode of the result,
191 useful if X is a CONST_INT. */
194 negate_rtx (mode, x)
195 enum machine_mode mode;
196 rtx x;
198 rtx result = simplify_unary_operation (NEG, mode, x, mode);
200 if (result == 0)
201 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
203 return result;
206 /* Generate code to store value from rtx VALUE
207 into a bit-field within structure STR_RTX
208 containing BITSIZE bits starting at bit BITNUM.
209 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
210 ALIGN is the alignment that STR_RTX is known to have.
211 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
213 /* ??? Note that there are two different ideas here for how
214 to determine the size to count bits within, for a register.
215 One is BITS_PER_WORD, and the other is the size of operand 3
216 of the insv pattern.
218 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
219 else, we use the mode of operand 3. */
222 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
223 rtx str_rtx;
224 unsigned HOST_WIDE_INT bitsize;
225 unsigned HOST_WIDE_INT bitnum;
226 enum machine_mode fieldmode;
227 rtx value;
228 unsigned int align;
229 HOST_WIDE_INT total_size;
231 unsigned int unit
232 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
233 unsigned HOST_WIDE_INT offset = bitnum / unit;
234 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
235 register rtx op0 = str_rtx;
236 #ifdef HAVE_insv
237 unsigned HOST_WIDE_INT insv_bitsize;
238 enum machine_mode op_mode;
240 op_mode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
241 if (op_mode == VOIDmode)
242 op_mode = word_mode;
243 insv_bitsize = GET_MODE_BITSIZE (op_mode);
244 #endif
246 /* It is wrong to have align==0, since every object is aligned at
247 least at a bit boundary. This usually means a bug elsewhere. */
248 if (align == 0)
249 abort ();
251 /* Discount the part of the structure before the desired byte.
252 We need to know how many bytes are safe to reference after it. */
253 if (total_size >= 0)
254 total_size -= (bitpos / BIGGEST_ALIGNMENT
255 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
257 while (GET_CODE (op0) == SUBREG)
259 /* The following line once was done only if WORDS_BIG_ENDIAN,
260 but I think that is a mistake. WORDS_BIG_ENDIAN is
261 meaningful at a much higher level; when structures are copied
262 between memory and regs, the higher-numbered regs
263 always get higher addresses. */
264 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
265 /* We used to adjust BITPOS here, but now we do the whole adjustment
266 right after the loop. */
267 op0 = SUBREG_REG (op0);
270 /* If OP0 is a register, BITPOS must count within a word.
271 But as we have it, it counts within whatever size OP0 now has.
272 On a bigendian machine, these are not the same, so convert. */
273 if (BYTES_BIG_ENDIAN
274 && GET_CODE (op0) != MEM
275 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
276 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
278 value = protect_from_queue (value, 0);
280 if (flag_force_mem)
281 value = force_not_mem (value);
283 /* If the target is a register, overwriting the entire object, or storing
284 a full-word or multi-word field can be done with just a SUBREG.
286 If the target is memory, storing any naturally aligned field can be
287 done with a simple store. For targets that support fast unaligned
288 memory, any naturally sized, unit aligned field can be done directly. */
290 if (bitsize == GET_MODE_BITSIZE (fieldmode)
291 && (GET_CODE (op0) != MEM
292 ? (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
293 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
294 : (! SLOW_UNALIGNED_ACCESS (fieldmode, align)
295 || (offset * BITS_PER_UNIT % bitsize == 0
296 && align % GET_MODE_BITSIZE (fieldmode) == 0)))
297 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0))
299 if (GET_MODE (op0) != fieldmode)
301 if (GET_CODE (op0) == SUBREG)
303 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
304 || GET_MODE_CLASS (fieldmode) == MODE_INT
305 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
306 op0 = SUBREG_REG (op0);
307 else
308 /* Else we've got some float mode source being extracted into
309 a different float mode destination -- this combination of
310 subregs results in Severe Tire Damage. */
311 abort ();
313 if (GET_CODE (op0) == REG)
314 op0 = gen_rtx_SUBREG (fieldmode, op0,
315 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
316 + (offset * UNITS_PER_WORD));
317 else
318 op0 = change_address (op0, fieldmode,
319 plus_constant (XEXP (op0, 0), offset));
321 emit_move_insn (op0, value);
322 return value;
325 /* Make sure we are playing with integral modes. Pun with subregs
326 if we aren't. This must come after the entire register case above,
327 since that case is valid for any mode. The following cases are only
328 valid for integral modes. */
330 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
331 if (imode != GET_MODE (op0))
333 if (GET_CODE (op0) == MEM)
334 op0 = change_address (op0, imode, NULL_RTX);
335 else if (imode != BLKmode)
336 op0 = gen_lowpart (imode, op0);
337 else
338 abort ();
342 /* Storing an lsb-aligned field in a register
343 can be done with a movestrict instruction. */
345 if (GET_CODE (op0) != MEM
346 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
347 && bitsize == GET_MODE_BITSIZE (fieldmode)
348 && (movstrict_optab->handlers[(int) fieldmode].insn_code
349 != CODE_FOR_nothing))
351 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
353 /* Get appropriate low part of the value being stored. */
354 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
355 value = gen_lowpart (fieldmode, value);
356 else if (!(GET_CODE (value) == SYMBOL_REF
357 || GET_CODE (value) == LABEL_REF
358 || GET_CODE (value) == CONST))
359 value = convert_to_mode (fieldmode, value, 0);
361 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
362 value = copy_to_mode_reg (fieldmode, value);
364 if (GET_CODE (op0) == SUBREG)
366 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
367 || GET_MODE_CLASS (fieldmode) == MODE_INT
368 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
369 op0 = SUBREG_REG (op0);
370 else
371 /* Else we've got some float mode source being extracted into
372 a different float mode destination -- this combination of
373 subregs results in Severe Tire Damage. */
374 abort ();
377 emit_insn (GEN_FCN (icode)
378 (gen_rtx_SUBREG (fieldmode, op0,
379 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
380 + (offset * UNITS_PER_WORD)),
381 value));
383 return value;
386 /* Handle fields bigger than a word. */
388 if (bitsize > BITS_PER_WORD)
390 /* Here we transfer the words of the field
391 in the order least significant first.
392 This is because the most significant word is the one which may
393 be less than full.
394 However, only do that if the value is not BLKmode. */
396 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
397 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
398 unsigned int i;
400 /* This is the mode we must force value to, so that there will be enough
401 subwords to extract. Note that fieldmode will often (always?) be
402 VOIDmode, because that is what store_field uses to indicate that this
403 is a bit field, but passing VOIDmode to operand_subword_force will
404 result in an abort. */
405 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
407 for (i = 0; i < nwords; i++)
409 /* If I is 0, use the low-order word in both field and target;
410 if I is 1, use the next to lowest word; and so on. */
411 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
412 unsigned int bit_offset = (backwards
413 ? MAX ((int) bitsize - ((int) i + 1)
414 * BITS_PER_WORD,
416 : (int) i * BITS_PER_WORD);
418 store_bit_field (op0, MIN (BITS_PER_WORD,
419 bitsize - i * BITS_PER_WORD),
420 bitnum + bit_offset, word_mode,
421 operand_subword_force (value, wordnum,
422 (GET_MODE (value) == VOIDmode
423 ? fieldmode
424 : GET_MODE (value))),
425 align, total_size);
427 return value;
430 /* From here on we can assume that the field to be stored in is
431 a full-word (whatever type that is), since it is shorter than a word. */
433 /* OFFSET is the number of words or bytes (UNIT says which)
434 from STR_RTX to the first word or byte containing part of the field. */
436 if (GET_CODE (op0) != MEM)
438 if (offset != 0
439 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
441 if (GET_CODE (op0) != REG)
443 /* Since this is a destination (lvalue), we can't copy it to a
444 pseudo. We can trivially remove a SUBREG that does not
445 change the size of the operand. Such a SUBREG may have been
446 added above. Otherwise, abort. */
447 if (GET_CODE (op0) == SUBREG
448 && (GET_MODE_SIZE (GET_MODE (op0))
449 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
450 op0 = SUBREG_REG (op0);
451 else
452 abort ();
454 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
455 op0, (offset * UNITS_PER_WORD));
457 offset = 0;
459 else
461 op0 = protect_from_queue (op0, 1);
464 /* If VALUE is a floating-point mode, access it as an integer of the
465 corresponding size. This can occur on a machine with 64 bit registers
466 that uses SFmode for float. This can also occur for unaligned float
467 structure fields. */
468 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
470 if (GET_CODE (value) != REG)
471 value = copy_to_reg (value);
472 value = gen_rtx_SUBREG (word_mode, value, 0);
475 /* Now OFFSET is nonzero only if OP0 is memory
476 and is therefore always measured in bytes. */
478 #ifdef HAVE_insv
479 if (HAVE_insv
480 && GET_MODE (value) != BLKmode
481 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
482 /* Ensure insv's size is wide enough for this field. */
483 && (insv_bitsize >= bitsize)
484 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
485 && (bitsize + bitpos > insv_bitsize)))
487 int xbitpos = bitpos;
488 rtx value1;
489 rtx xop0 = op0;
490 rtx last = get_last_insn ();
491 rtx pat;
492 enum machine_mode maxmode;
493 int save_volatile_ok = volatile_ok;
495 maxmode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
496 if (maxmode == VOIDmode)
497 maxmode = word_mode;
499 volatile_ok = 1;
501 /* If this machine's insv can only insert into a register, copy OP0
502 into a register and save it back later. */
503 /* This used to check flag_force_mem, but that was a serious
504 de-optimization now that flag_force_mem is enabled by -O2. */
505 if (GET_CODE (op0) == MEM
506 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
507 (op0, VOIDmode)))
509 rtx tempreg;
510 enum machine_mode bestmode;
512 /* Get the mode to use for inserting into this field. If OP0 is
513 BLKmode, get the smallest mode consistent with the alignment. If
514 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
515 mode. Otherwise, use the smallest mode containing the field. */
517 if (GET_MODE (op0) == BLKmode
518 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
519 bestmode
520 = get_best_mode (bitsize, bitnum, align, maxmode,
521 MEM_VOLATILE_P (op0));
522 else
523 bestmode = GET_MODE (op0);
525 if (bestmode == VOIDmode
526 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
527 && GET_MODE_BITSIZE (bestmode) > align))
528 goto insv_loses;
530 /* Adjust address to point to the containing unit of that mode. */
531 unit = GET_MODE_BITSIZE (bestmode);
532 /* Compute offset as multiple of this unit, counting in bytes. */
533 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
534 bitpos = bitnum % unit;
535 op0 = change_address (op0, bestmode,
536 plus_constant (XEXP (op0, 0), offset));
538 /* Fetch that unit, store the bitfield in it, then store
539 the unit. */
540 tempreg = copy_to_reg (op0);
541 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
542 align, total_size);
543 emit_move_insn (op0, tempreg);
544 return value;
546 volatile_ok = save_volatile_ok;
548 /* Add OFFSET into OP0's address. */
549 if (GET_CODE (xop0) == MEM)
550 xop0 = change_address (xop0, byte_mode,
551 plus_constant (XEXP (xop0, 0), offset));
553 /* If xop0 is a register, we need it in MAXMODE
554 to make it acceptable to the format of insv. */
555 if (GET_CODE (xop0) == SUBREG)
556 /* We can't just change the mode, because this might clobber op0,
557 and we will need the original value of op0 if insv fails. */
558 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
559 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
560 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
562 /* On big-endian machines, we count bits from the most significant.
563 If the bit field insn does not, we must invert. */
565 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
566 xbitpos = unit - bitsize - xbitpos;
568 /* We have been counting XBITPOS within UNIT.
569 Count instead within the size of the register. */
570 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
571 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
573 unit = GET_MODE_BITSIZE (maxmode);
575 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
576 value1 = value;
577 if (GET_MODE (value) != maxmode)
579 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
581 /* Optimization: Don't bother really extending VALUE
582 if it has all the bits we will actually use. However,
583 if we must narrow it, be sure we do it correctly. */
585 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
587 /* Avoid making subreg of a subreg, or of a mem. */
588 if (GET_CODE (value1) != REG)
589 value1 = copy_to_reg (value1);
590 value1 = gen_rtx_SUBREG (maxmode, value1, 0);
592 else
593 value1 = gen_lowpart (maxmode, value1);
595 else if (!CONSTANT_P (value))
596 /* Parse phase is supposed to make VALUE's data type
597 match that of the component reference, which is a type
598 at least as wide as the field; so VALUE should have
599 a mode that corresponds to that type. */
600 abort ();
603 /* If this machine's insv insists on a register,
604 get VALUE1 into a register. */
605 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
606 (value1, maxmode)))
607 value1 = force_reg (maxmode, value1);
609 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
610 if (pat)
611 emit_insn (pat);
612 else
614 delete_insns_since (last);
615 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
618 else
619 insv_loses:
620 #endif
621 /* Insv is not available; store using shifts and boolean ops. */
622 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
623 return value;
626 /* Use shifts and boolean operations to store VALUE
627 into a bit field of width BITSIZE
628 in a memory location specified by OP0 except offset by OFFSET bytes.
629 (OFFSET must be 0 if OP0 is a register.)
630 The field starts at position BITPOS within the byte.
631 (If OP0 is a register, it may be a full word or a narrower mode,
632 but BITPOS still counts within a full word,
633 which is significant on bigendian machines.)
634 STRUCT_ALIGN is the alignment the structure is known to have.
636 Note that protect_from_queue has already been done on OP0 and VALUE. */
638 static void
639 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
640 register rtx op0;
641 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
642 register rtx value;
643 unsigned int struct_align;
645 register enum machine_mode mode;
646 unsigned int total_bits = BITS_PER_WORD;
647 rtx subtarget, temp;
648 int all_zero = 0;
649 int all_one = 0;
651 if (! SLOW_UNALIGNED_ACCESS (word_mode, struct_align))
652 struct_align = BIGGEST_ALIGNMENT;
654 /* There is a case not handled here:
655 a structure with a known alignment of just a halfword
656 and a field split across two aligned halfwords within the structure.
657 Or likewise a structure with a known alignment of just a byte
658 and a field split across two bytes.
659 Such cases are not supposed to be able to occur. */
661 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
663 if (offset != 0)
664 abort ();
665 /* Special treatment for a bit field split across two registers. */
666 if (bitsize + bitpos > BITS_PER_WORD)
668 store_split_bit_field (op0, bitsize, bitpos,
669 value, BITS_PER_WORD);
670 return;
673 else
675 /* Get the proper mode to use for this field. We want a mode that
676 includes the entire field. If such a mode would be larger than
677 a word, we won't be doing the extraction the normal way. */
679 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
680 struct_align, word_mode,
681 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
683 if (mode == VOIDmode)
685 /* The only way this should occur is if the field spans word
686 boundaries. */
687 store_split_bit_field (op0,
688 bitsize, bitpos + offset * BITS_PER_UNIT,
689 value, struct_align);
690 return;
693 total_bits = GET_MODE_BITSIZE (mode);
695 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
696 be in the range 0 to total_bits-1, and put any excess bytes in
697 OFFSET. */
698 if (bitpos >= total_bits)
700 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
701 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
702 * BITS_PER_UNIT);
705 /* Get ref to an aligned byte, halfword, or word containing the field.
706 Adjust BITPOS to be position within a word,
707 and OFFSET to be the offset of that word.
708 Then alter OP0 to refer to that word. */
709 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
710 offset -= (offset % (total_bits / BITS_PER_UNIT));
711 op0 = change_address (op0, mode,
712 plus_constant (XEXP (op0, 0), offset));
715 mode = GET_MODE (op0);
717 /* Now MODE is either some integral mode for a MEM as OP0,
718 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
719 The bit field is contained entirely within OP0.
720 BITPOS is the starting bit number within OP0.
721 (OP0's mode may actually be narrower than MODE.) */
723 if (BYTES_BIG_ENDIAN)
724 /* BITPOS is the distance between our msb
725 and that of the containing datum.
726 Convert it to the distance from the lsb. */
727 bitpos = total_bits - bitsize - bitpos;
729 /* Now BITPOS is always the distance between our lsb
730 and that of OP0. */
732 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
733 we must first convert its mode to MODE. */
735 if (GET_CODE (value) == CONST_INT)
737 register HOST_WIDE_INT v = INTVAL (value);
739 if (bitsize < HOST_BITS_PER_WIDE_INT)
740 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
742 if (v == 0)
743 all_zero = 1;
744 else if ((bitsize < HOST_BITS_PER_WIDE_INT
745 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
746 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
747 all_one = 1;
749 value = lshift_value (mode, value, bitpos, bitsize);
751 else
753 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
754 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
756 if (GET_MODE (value) != mode)
758 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
759 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
760 value = gen_lowpart (mode, value);
761 else
762 value = convert_to_mode (mode, value, 1);
765 if (must_and)
766 value = expand_binop (mode, and_optab, value,
767 mask_rtx (mode, 0, bitsize, 0),
768 NULL_RTX, 1, OPTAB_LIB_WIDEN);
769 if (bitpos > 0)
770 value = expand_shift (LSHIFT_EXPR, mode, value,
771 build_int_2 (bitpos, 0), NULL_RTX, 1);
774 /* Now clear the chosen bits in OP0,
775 except that if VALUE is -1 we need not bother. */
777 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
779 if (! all_one)
781 temp = expand_binop (mode, and_optab, op0,
782 mask_rtx (mode, bitpos, bitsize, 1),
783 subtarget, 1, OPTAB_LIB_WIDEN);
784 subtarget = temp;
786 else
787 temp = op0;
789 /* Now logical-or VALUE into OP0, unless it is zero. */
791 if (! all_zero)
792 temp = expand_binop (mode, ior_optab, temp, value,
793 subtarget, 1, OPTAB_LIB_WIDEN);
794 if (op0 != temp)
795 emit_move_insn (op0, temp);
798 /* Store a bit field that is split across multiple accessible memory objects.
800 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
801 BITSIZE is the field width; BITPOS the position of its first bit
802 (within the word).
803 VALUE is the value to store.
804 ALIGN is the known alignment of OP0.
805 This is also the size of the memory objects to be used.
807 This does not yet handle fields wider than BITS_PER_WORD. */
809 static void
810 store_split_bit_field (op0, bitsize, bitpos, value, align)
811 rtx op0;
812 unsigned HOST_WIDE_INT bitsize, bitpos;
813 rtx value;
814 unsigned int align;
816 unsigned int unit;
817 unsigned int bitsdone = 0;
819 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
820 much at a time. */
821 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
822 unit = BITS_PER_WORD;
823 else
824 unit = MIN (align, BITS_PER_WORD);
826 /* If VALUE is a constant other than a CONST_INT, get it into a register in
827 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
828 that VALUE might be a floating-point constant. */
829 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
831 rtx word = gen_lowpart_common (word_mode, value);
833 if (word && (value != word))
834 value = word;
835 else
836 value = gen_lowpart_common (word_mode,
837 force_reg (GET_MODE (value) != VOIDmode
838 ? GET_MODE (value)
839 : word_mode, value));
841 else if (GET_CODE (value) == ADDRESSOF)
842 value = copy_to_reg (value);
844 while (bitsdone < bitsize)
846 unsigned HOST_WIDE_INT thissize;
847 rtx part, word;
848 unsigned HOST_WIDE_INT thispos;
849 unsigned HOST_WIDE_INT offset;
851 offset = (bitpos + bitsdone) / unit;
852 thispos = (bitpos + bitsdone) % unit;
854 /* THISSIZE must not overrun a word boundary. Otherwise,
855 store_fixed_bit_field will call us again, and we will mutually
856 recurse forever. */
857 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
858 thissize = MIN (thissize, unit - thispos);
860 if (BYTES_BIG_ENDIAN)
862 int total_bits;
864 /* We must do an endian conversion exactly the same way as it is
865 done in extract_bit_field, so that the two calls to
866 extract_fixed_bit_field will have comparable arguments. */
867 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
868 total_bits = BITS_PER_WORD;
869 else
870 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
872 /* Fetch successively less significant portions. */
873 if (GET_CODE (value) == CONST_INT)
874 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
875 >> (bitsize - bitsdone - thissize))
876 & (((HOST_WIDE_INT) 1 << thissize) - 1));
877 else
878 /* The args are chosen so that the last part includes the
879 lsb. Give extract_bit_field the value it needs (with
880 endianness compensation) to fetch the piece we want.
882 ??? We have no idea what the alignment of VALUE is, so
883 we have to use a guess. */
884 part
885 = extract_fixed_bit_field
886 (word_mode, value, 0, thissize,
887 total_bits - bitsize + bitsdone, NULL_RTX, 1,
888 GET_MODE (value) == VOIDmode
889 ? UNITS_PER_WORD
890 : (GET_MODE (value) == BLKmode
891 ? 1 : GET_MODE_ALIGNMENT (GET_MODE (value))));
893 else
895 /* Fetch successively more significant portions. */
896 if (GET_CODE (value) == CONST_INT)
897 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
898 >> bitsdone)
899 & (((HOST_WIDE_INT) 1 << thissize) - 1));
900 else
901 part
902 = extract_fixed_bit_field
903 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
904 GET_MODE (value) == VOIDmode
905 ? UNITS_PER_WORD
906 : (GET_MODE (value) == BLKmode
907 ? 1 : GET_MODE_ALIGNMENT (GET_MODE (value))));
910 /* If OP0 is a register, then handle OFFSET here.
912 When handling multiword bitfields, extract_bit_field may pass
913 down a word_mode SUBREG of a larger REG for a bitfield that actually
914 crosses a word boundary. Thus, for a SUBREG, we must find
915 the current word starting from the base register. */
916 if (GET_CODE (op0) == SUBREG)
918 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
919 word = operand_subword_force (SUBREG_REG (op0), word_offset,
920 GET_MODE (SUBREG_REG (op0)));
921 offset = 0;
923 else if (GET_CODE (op0) == REG)
925 word = operand_subword_force (op0, offset, GET_MODE (op0));
926 offset = 0;
928 else
929 word = op0;
931 /* OFFSET is in UNITs, and UNIT is in bits.
932 store_fixed_bit_field wants offset in bytes. */
933 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
934 thissize, thispos, part, align);
935 bitsdone += thissize;
939 /* Generate code to extract a byte-field from STR_RTX
940 containing BITSIZE bits, starting at BITNUM,
941 and put it in TARGET if possible (if TARGET is nonzero).
942 Regardless of TARGET, we return the rtx for where the value is placed.
943 It may be a QUEUED.
945 STR_RTX is the structure containing the byte (a REG or MEM).
946 UNSIGNEDP is nonzero if this is an unsigned bit field.
947 MODE is the natural mode of the field value once extracted.
948 TMODE is the mode the caller would like the value to have;
949 but the value may be returned with type MODE instead.
951 ALIGN is the alignment that STR_RTX is known to have.
952 TOTAL_SIZE is the size in bytes of the containing structure,
953 or -1 if varying.
955 If a TARGET is specified and we can store in it at no extra cost,
956 we do so, and return TARGET.
957 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
958 if they are equally easy. */
961 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
962 target, mode, tmode, align, total_size)
963 rtx str_rtx;
964 unsigned HOST_WIDE_INT bitsize;
965 unsigned HOST_WIDE_INT bitnum;
966 int unsignedp;
967 rtx target;
968 enum machine_mode mode, tmode;
969 unsigned int align;
970 HOST_WIDE_INT total_size;
972 unsigned int unit
973 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
974 unsigned HOST_WIDE_INT offset = bitnum / unit;
975 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
976 register rtx op0 = str_rtx;
977 rtx spec_target = target;
978 rtx spec_target_subreg = 0;
979 enum machine_mode int_mode;
980 #ifdef HAVE_extv
981 unsigned HOST_WIDE_INT extv_bitsize;
982 enum machine_mode extv_mode;
983 #endif
984 #ifdef HAVE_extzv
985 unsigned HOST_WIDE_INT extzv_bitsize;
986 enum machine_mode extzv_mode;
987 #endif
989 #ifdef HAVE_extv
990 extv_mode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
991 if (extv_mode == VOIDmode)
992 extv_mode = word_mode;
993 extv_bitsize = GET_MODE_BITSIZE (extv_mode);
994 #endif
996 #ifdef HAVE_extzv
997 extzv_mode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
998 if (extzv_mode == VOIDmode)
999 extzv_mode = word_mode;
1000 extzv_bitsize = GET_MODE_BITSIZE (extzv_mode);
1001 #endif
1003 /* Discount the part of the structure before the desired byte.
1004 We need to know how many bytes are safe to reference after it. */
1005 if (total_size >= 0)
1006 total_size -= (bitpos / BIGGEST_ALIGNMENT
1007 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1009 if (tmode == VOIDmode)
1010 tmode = mode;
1011 while (GET_CODE (op0) == SUBREG)
1013 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
1014 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
1016 offset += SUBREG_BYTE (op0) / UNITS_PER_WORD;
1018 inner_size = MIN (inner_size, BITS_PER_WORD);
1020 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1022 bitpos += inner_size - outer_size;
1023 if (bitpos > unit)
1025 offset += (bitpos / unit);
1026 bitpos %= unit;
1030 op0 = SUBREG_REG (op0);
1033 if (GET_CODE (op0) == REG
1034 && mode == GET_MODE (op0)
1035 && bitnum == 0
1036 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1038 /* We're trying to extract a full register from itself. */
1039 return op0;
1042 /* Make sure we are playing with integral modes. Pun with subregs
1043 if we aren't. */
1045 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1046 if (imode != GET_MODE (op0))
1048 if (GET_CODE (op0) == MEM)
1049 op0 = change_address (op0, imode, NULL_RTX);
1050 else if (imode != BLKmode)
1051 op0 = gen_lowpart (imode, op0);
1052 else
1053 abort ();
1057 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1058 If that's wrong, the solution is to test for it and set TARGET to 0
1059 if needed. */
1061 /* If OP0 is a register, BITPOS must count within a word.
1062 But as we have it, it counts within whatever size OP0 now has.
1063 On a bigendian machine, these are not the same, so convert. */
1064 if (BYTES_BIG_ENDIAN
1065 && GET_CODE (op0) != MEM
1066 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1067 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1069 /* Extracting a full-word or multi-word value
1070 from a structure in a register or aligned memory.
1071 This can be done with just SUBREG.
1072 So too extracting a subword value in
1073 the least significant part of the register. */
1075 if (((GET_CODE (op0) != MEM
1076 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1077 GET_MODE_BITSIZE (GET_MODE (op0))))
1078 || (GET_CODE (op0) == MEM
1079 && (! SLOW_UNALIGNED_ACCESS (mode, align)
1080 || (offset * BITS_PER_UNIT % bitsize == 0
1081 && align % bitsize == 0))))
1082 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1083 && bitpos % BITS_PER_WORD == 0)
1084 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1085 /* ??? The big endian test here is wrong. This is correct
1086 if the value is in a register, and if mode_for_size is not
1087 the same mode as op0. This causes us to get unnecessarily
1088 inefficient code from the Thumb port when -mbig-endian. */
1089 && (BYTES_BIG_ENDIAN
1090 ? bitpos + bitsize == BITS_PER_WORD
1091 : bitpos == 0))))
1093 enum machine_mode mode1
1094 = (VECTOR_MODE_P (tmode) ? mode
1095 : mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0));
1097 if (mode1 != GET_MODE (op0))
1099 if (GET_CODE (op0) == SUBREG)
1101 if (GET_MODE (SUBREG_REG (op0)) == mode1
1102 || GET_MODE_CLASS (mode1) == MODE_INT
1103 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1104 op0 = SUBREG_REG (op0);
1105 else
1106 /* Else we've got some float mode source being extracted into
1107 a different float mode destination -- this combination of
1108 subregs results in Severe Tire Damage. */
1109 abort ();
1111 if (GET_CODE (op0) == REG)
1112 op0 = gen_rtx_SUBREG (mode1, op0,
1113 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
1114 + (offset * UNITS_PER_WORD));
1115 else
1116 op0 = change_address (op0, mode1,
1117 plus_constant (XEXP (op0, 0), offset));
1119 if (mode1 != mode)
1120 return convert_to_mode (tmode, op0, unsignedp);
1121 return op0;
1124 /* Handle fields bigger than a word. */
1126 if (bitsize > BITS_PER_WORD)
1128 /* Here we transfer the words of the field
1129 in the order least significant first.
1130 This is because the most significant word is the one which may
1131 be less than full. */
1133 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1134 unsigned int i;
1136 if (target == 0 || GET_CODE (target) != REG)
1137 target = gen_reg_rtx (mode);
1139 /* Indicate for flow that the entire target reg is being set. */
1140 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1142 for (i = 0; i < nwords; i++)
1144 /* If I is 0, use the low-order word in both field and target;
1145 if I is 1, use the next to lowest word; and so on. */
1146 /* Word number in TARGET to use. */
1147 unsigned int wordnum
1148 = (WORDS_BIG_ENDIAN
1149 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1150 : i);
1151 /* Offset from start of field in OP0. */
1152 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1153 ? MAX (0, ((int) bitsize - ((int) i + 1)
1154 * (int) BITS_PER_WORD))
1155 : (int) i * BITS_PER_WORD);
1156 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1157 rtx result_part
1158 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1159 bitsize - i * BITS_PER_WORD),
1160 bitnum + bit_offset, 1, target_part, mode,
1161 word_mode, align, total_size);
1163 if (target_part == 0)
1164 abort ();
1166 if (result_part != target_part)
1167 emit_move_insn (target_part, result_part);
1170 if (unsignedp)
1172 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1173 need to be zero'd out. */
1174 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1176 unsigned int i, total_words;
1178 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1179 for (i = nwords; i < total_words; i++)
1181 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1182 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1183 emit_move_insn (target_part, const0_rtx);
1186 return target;
1189 /* Signed bit field: sign-extend with two arithmetic shifts. */
1190 target = expand_shift (LSHIFT_EXPR, mode, target,
1191 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1192 NULL_RTX, 0);
1193 return expand_shift (RSHIFT_EXPR, mode, target,
1194 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1195 NULL_RTX, 0);
1198 /* From here on we know the desired field is smaller than a word. */
1200 /* Check if there is a correspondingly-sized integer field, so we can
1201 safely extract it as one size of integer, if necessary; then
1202 truncate or extend to the size that is wanted; then use SUBREGs or
1203 convert_to_mode to get one of the modes we really wanted. */
1205 int_mode = int_mode_for_mode (tmode);
1206 if (int_mode == BLKmode)
1207 int_mode = int_mode_for_mode (mode);
1208 if (int_mode == BLKmode)
1209 abort(); /* Should probably push op0 out to memory and then
1210 do a load. */
1212 /* OFFSET is the number of words or bytes (UNIT says which)
1213 from STR_RTX to the first word or byte containing part of the field. */
1215 if (GET_CODE (op0) != MEM)
1217 if (offset != 0
1218 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1220 if (GET_CODE (op0) != REG)
1221 op0 = copy_to_reg (op0);
1222 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1223 op0, (offset * UNITS_PER_WORD));
1225 offset = 0;
1227 else
1229 op0 = protect_from_queue (str_rtx, 1);
1232 /* Now OFFSET is nonzero only for memory operands. */
1234 if (unsignedp)
1236 #ifdef HAVE_extzv
1237 if (HAVE_extzv
1238 && (extzv_bitsize >= bitsize)
1239 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1240 && (bitsize + bitpos > extzv_bitsize)))
1242 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1243 rtx bitsize_rtx, bitpos_rtx;
1244 rtx last = get_last_insn ();
1245 rtx xop0 = op0;
1246 rtx xtarget = target;
1247 rtx xspec_target = spec_target;
1248 rtx xspec_target_subreg = spec_target_subreg;
1249 rtx pat;
1250 enum machine_mode maxmode;
1252 maxmode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
1253 if (maxmode == VOIDmode)
1254 maxmode = word_mode;
1256 if (GET_CODE (xop0) == MEM)
1258 int save_volatile_ok = volatile_ok;
1259 volatile_ok = 1;
1261 /* Is the memory operand acceptable? */
1262 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1263 (xop0, GET_MODE (xop0))))
1265 /* No, load into a reg and extract from there. */
1266 enum machine_mode bestmode;
1268 /* Get the mode to use for inserting into this field. If
1269 OP0 is BLKmode, get the smallest mode consistent with the
1270 alignment. If OP0 is a non-BLKmode object that is no
1271 wider than MAXMODE, use its mode. Otherwise, use the
1272 smallest mode containing the field. */
1274 if (GET_MODE (xop0) == BLKmode
1275 || (GET_MODE_SIZE (GET_MODE (op0))
1276 > GET_MODE_SIZE (maxmode)))
1277 bestmode = get_best_mode (bitsize, bitnum, align, maxmode,
1278 MEM_VOLATILE_P (xop0));
1279 else
1280 bestmode = GET_MODE (xop0);
1282 if (bestmode == VOIDmode
1283 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
1284 && GET_MODE_BITSIZE (bestmode) > align))
1285 goto extzv_loses;
1287 /* Compute offset as multiple of this unit,
1288 counting in bytes. */
1289 unit = GET_MODE_BITSIZE (bestmode);
1290 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1291 xbitpos = bitnum % unit;
1292 xop0 = change_address (xop0, bestmode,
1293 plus_constant (XEXP (xop0, 0),
1294 xoffset));
1295 /* Fetch it to a register in that size. */
1296 xop0 = force_reg (bestmode, xop0);
1298 /* XBITPOS counts within UNIT, which is what is expected. */
1300 else
1301 /* Get ref to first byte containing part of the field. */
1302 xop0 = change_address (xop0, byte_mode,
1303 plus_constant (XEXP (xop0, 0), xoffset));
1305 volatile_ok = save_volatile_ok;
1308 /* If op0 is a register, we need it in MAXMODE (which is usually
1309 SImode). to make it acceptable to the format of extzv. */
1310 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1311 goto extzv_loses;
1312 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1313 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1315 /* On big-endian machines, we count bits from the most significant.
1316 If the bit field insn does not, we must invert. */
1317 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1318 xbitpos = unit - bitsize - xbitpos;
1320 /* Now convert from counting within UNIT to counting in MAXMODE. */
1321 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1322 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1324 unit = GET_MODE_BITSIZE (maxmode);
1326 if (xtarget == 0
1327 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1328 xtarget = xspec_target = gen_reg_rtx (tmode);
1330 if (GET_MODE (xtarget) != maxmode)
1332 if (GET_CODE (xtarget) == REG)
1334 int wider = (GET_MODE_SIZE (maxmode)
1335 > GET_MODE_SIZE (GET_MODE (xtarget)));
1336 xtarget = gen_lowpart (maxmode, xtarget);
1337 if (wider)
1338 xspec_target_subreg = xtarget;
1340 else
1341 xtarget = gen_reg_rtx (maxmode);
1344 /* If this machine's extzv insists on a register target,
1345 make sure we have one. */
1346 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1347 (xtarget, maxmode)))
1348 xtarget = gen_reg_rtx (maxmode);
1350 bitsize_rtx = GEN_INT (bitsize);
1351 bitpos_rtx = GEN_INT (xbitpos);
1353 pat = gen_extzv (protect_from_queue (xtarget, 1),
1354 xop0, bitsize_rtx, bitpos_rtx);
1355 if (pat)
1357 emit_insn (pat);
1358 target = xtarget;
1359 spec_target = xspec_target;
1360 spec_target_subreg = xspec_target_subreg;
1362 else
1364 delete_insns_since (last);
1365 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1366 bitpos, target, 1, align);
1369 else
1370 extzv_loses:
1371 #endif
1372 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1373 bitpos, target, 1, align);
1375 else
1377 #ifdef HAVE_extv
1378 if (HAVE_extv
1379 && (extv_bitsize >= bitsize)
1380 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1381 && (bitsize + bitpos > extv_bitsize)))
1383 int xbitpos = bitpos, xoffset = offset;
1384 rtx bitsize_rtx, bitpos_rtx;
1385 rtx last = get_last_insn ();
1386 rtx xop0 = op0, xtarget = target;
1387 rtx xspec_target = spec_target;
1388 rtx xspec_target_subreg = spec_target_subreg;
1389 rtx pat;
1390 enum machine_mode maxmode;
1392 maxmode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
1393 if (maxmode == VOIDmode)
1394 maxmode = word_mode;
1396 if (GET_CODE (xop0) == MEM)
1398 /* Is the memory operand acceptable? */
1399 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1400 (xop0, GET_MODE (xop0))))
1402 /* No, load into a reg and extract from there. */
1403 enum machine_mode bestmode;
1405 /* Get the mode to use for inserting into this field. If
1406 OP0 is BLKmode, get the smallest mode consistent with the
1407 alignment. If OP0 is a non-BLKmode object that is no
1408 wider than MAXMODE, use its mode. Otherwise, use the
1409 smallest mode containing the field. */
1411 if (GET_MODE (xop0) == BLKmode
1412 || (GET_MODE_SIZE (GET_MODE (op0))
1413 > GET_MODE_SIZE (maxmode)))
1414 bestmode = get_best_mode (bitsize, bitnum, align, maxmode,
1415 MEM_VOLATILE_P (xop0));
1416 else
1417 bestmode = GET_MODE (xop0);
1419 if (bestmode == VOIDmode
1420 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
1421 && GET_MODE_BITSIZE (bestmode) > align))
1422 goto extv_loses;
1424 /* Compute offset as multiple of this unit,
1425 counting in bytes. */
1426 unit = GET_MODE_BITSIZE (bestmode);
1427 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1428 xbitpos = bitnum % unit;
1429 xop0 = change_address (xop0, bestmode,
1430 plus_constant (XEXP (xop0, 0),
1431 xoffset));
1432 /* Fetch it to a register in that size. */
1433 xop0 = force_reg (bestmode, xop0);
1435 /* XBITPOS counts within UNIT, which is what is expected. */
1437 else
1438 /* Get ref to first byte containing part of the field. */
1439 xop0 = change_address (xop0, byte_mode,
1440 plus_constant (XEXP (xop0, 0), xoffset));
1443 /* If op0 is a register, we need it in MAXMODE (which is usually
1444 SImode) to make it acceptable to the format of extv. */
1445 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1446 goto extv_loses;
1447 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1448 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1450 /* On big-endian machines, we count bits from the most significant.
1451 If the bit field insn does not, we must invert. */
1452 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1453 xbitpos = unit - bitsize - xbitpos;
1455 /* XBITPOS counts within a size of UNIT.
1456 Adjust to count within a size of MAXMODE. */
1457 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1458 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1460 unit = GET_MODE_BITSIZE (maxmode);
1462 if (xtarget == 0
1463 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1464 xtarget = xspec_target = gen_reg_rtx (tmode);
1466 if (GET_MODE (xtarget) != maxmode)
1468 if (GET_CODE (xtarget) == REG)
1470 int wider = (GET_MODE_SIZE (maxmode)
1471 > GET_MODE_SIZE (GET_MODE (xtarget)));
1472 xtarget = gen_lowpart (maxmode, xtarget);
1473 if (wider)
1474 xspec_target_subreg = xtarget;
1476 else
1477 xtarget = gen_reg_rtx (maxmode);
1480 /* If this machine's extv insists on a register target,
1481 make sure we have one. */
1482 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1483 (xtarget, maxmode)))
1484 xtarget = gen_reg_rtx (maxmode);
1486 bitsize_rtx = GEN_INT (bitsize);
1487 bitpos_rtx = GEN_INT (xbitpos);
1489 pat = gen_extv (protect_from_queue (xtarget, 1),
1490 xop0, bitsize_rtx, bitpos_rtx);
1491 if (pat)
1493 emit_insn (pat);
1494 target = xtarget;
1495 spec_target = xspec_target;
1496 spec_target_subreg = xspec_target_subreg;
1498 else
1500 delete_insns_since (last);
1501 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1502 bitpos, target, 0, align);
1505 else
1506 extv_loses:
1507 #endif
1508 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1509 bitpos, target, 0, align);
1511 if (target == spec_target)
1512 return target;
1513 if (target == spec_target_subreg)
1514 return spec_target;
1515 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1517 /* If the target mode is floating-point, first convert to the
1518 integer mode of that size and then access it as a floating-point
1519 value via a SUBREG. */
1520 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1522 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1523 MODE_INT, 0),
1524 target, unsignedp);
1525 if (GET_CODE (target) != REG)
1526 target = copy_to_reg (target);
1527 return gen_rtx_SUBREG (tmode, target, 0);
1529 else
1530 return convert_to_mode (tmode, target, unsignedp);
1532 return target;
1535 /* Extract a bit field using shifts and boolean operations
1536 Returns an rtx to represent the value.
1537 OP0 addresses a register (word) or memory (byte).
1538 BITPOS says which bit within the word or byte the bit field starts in.
1539 OFFSET says how many bytes farther the bit field starts;
1540 it is 0 if OP0 is a register.
1541 BITSIZE says how many bits long the bit field is.
1542 (If OP0 is a register, it may be narrower than a full word,
1543 but BITPOS still counts within a full word,
1544 which is significant on bigendian machines.)
1546 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1547 If TARGET is nonzero, attempts to store the value there
1548 and return TARGET, but this is not guaranteed.
1549 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1551 ALIGN is the alignment that STR_RTX is known to have. */
1553 static rtx
1554 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1555 target, unsignedp, align)
1556 enum machine_mode tmode;
1557 register rtx op0, target;
1558 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
1559 int unsignedp;
1560 unsigned int align;
1562 unsigned int total_bits = BITS_PER_WORD;
1563 enum machine_mode mode;
1565 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1567 /* Special treatment for a bit field split across two registers. */
1568 if (bitsize + bitpos > BITS_PER_WORD)
1569 return extract_split_bit_field (op0, bitsize, bitpos,
1570 unsignedp, align);
1572 else
1574 /* Get the proper mode to use for this field. We want a mode that
1575 includes the entire field. If such a mode would be larger than
1576 a word, we won't be doing the extraction the normal way. */
1578 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT, align,
1579 word_mode,
1580 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1582 if (mode == VOIDmode)
1583 /* The only way this should occur is if the field spans word
1584 boundaries. */
1585 return extract_split_bit_field (op0, bitsize,
1586 bitpos + offset * BITS_PER_UNIT,
1587 unsignedp, align);
1589 total_bits = GET_MODE_BITSIZE (mode);
1591 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1592 be in the range 0 to total_bits-1, and put any excess bytes in
1593 OFFSET. */
1594 if (bitpos >= total_bits)
1596 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1597 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1598 * BITS_PER_UNIT);
1601 /* Get ref to an aligned byte, halfword, or word containing the field.
1602 Adjust BITPOS to be position within a word,
1603 and OFFSET to be the offset of that word.
1604 Then alter OP0 to refer to that word. */
1605 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1606 offset -= (offset % (total_bits / BITS_PER_UNIT));
1607 op0 = change_address (op0, mode,
1608 plus_constant (XEXP (op0, 0), offset));
1611 mode = GET_MODE (op0);
1613 if (BYTES_BIG_ENDIAN)
1615 /* BITPOS is the distance between our msb and that of OP0.
1616 Convert it to the distance from the lsb. */
1618 bitpos = total_bits - bitsize - bitpos;
1621 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1622 We have reduced the big-endian case to the little-endian case. */
1624 if (unsignedp)
1626 if (bitpos)
1628 /* If the field does not already start at the lsb,
1629 shift it so it does. */
1630 tree amount = build_int_2 (bitpos, 0);
1631 /* Maybe propagate the target for the shift. */
1632 /* But not if we will return it--could confuse integrate.c. */
1633 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1634 && !REG_FUNCTION_VALUE_P (target)
1635 ? target : 0);
1636 if (tmode != mode) subtarget = 0;
1637 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1639 /* Convert the value to the desired mode. */
1640 if (mode != tmode)
1641 op0 = convert_to_mode (tmode, op0, 1);
1643 /* Unless the msb of the field used to be the msb when we shifted,
1644 mask out the upper bits. */
1646 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1647 #if 0
1648 #ifdef SLOW_ZERO_EXTEND
1649 /* Always generate an `and' if
1650 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1651 will combine fruitfully with the zero-extend. */
1652 || tmode != mode
1653 #endif
1654 #endif
1656 return expand_binop (GET_MODE (op0), and_optab, op0,
1657 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1658 target, 1, OPTAB_LIB_WIDEN);
1659 return op0;
1662 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1663 then arithmetic-shift its lsb to the lsb of the word. */
1664 op0 = force_reg (mode, op0);
1665 if (mode != tmode)
1666 target = 0;
1668 /* Find the narrowest integer mode that contains the field. */
1670 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1671 mode = GET_MODE_WIDER_MODE (mode))
1672 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1674 op0 = convert_to_mode (mode, op0, 0);
1675 break;
1678 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1680 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1681 /* Maybe propagate the target for the shift. */
1682 /* But not if we will return the result--could confuse integrate.c. */
1683 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1684 && ! REG_FUNCTION_VALUE_P (target)
1685 ? target : 0);
1686 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1689 return expand_shift (RSHIFT_EXPR, mode, op0,
1690 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1691 target, 0);
1694 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1695 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1696 complement of that if COMPLEMENT. The mask is truncated if
1697 necessary to the width of mode MODE. The mask is zero-extended if
1698 BITSIZE+BITPOS is too small for MODE. */
1700 static rtx
1701 mask_rtx (mode, bitpos, bitsize, complement)
1702 enum machine_mode mode;
1703 int bitpos, bitsize, complement;
1705 HOST_WIDE_INT masklow, maskhigh;
1707 if (bitpos < HOST_BITS_PER_WIDE_INT)
1708 masklow = (HOST_WIDE_INT) -1 << bitpos;
1709 else
1710 masklow = 0;
1712 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1713 masklow &= ((unsigned HOST_WIDE_INT) -1
1714 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1716 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1717 maskhigh = -1;
1718 else
1719 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1721 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1722 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1723 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1724 else
1725 maskhigh = 0;
1727 if (complement)
1729 maskhigh = ~maskhigh;
1730 masklow = ~masklow;
1733 return immed_double_const (masklow, maskhigh, mode);
1736 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1737 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1739 static rtx
1740 lshift_value (mode, value, bitpos, bitsize)
1741 enum machine_mode mode;
1742 rtx value;
1743 int bitpos, bitsize;
1745 unsigned HOST_WIDE_INT v = INTVAL (value);
1746 HOST_WIDE_INT low, high;
1748 if (bitsize < HOST_BITS_PER_WIDE_INT)
1749 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1751 if (bitpos < HOST_BITS_PER_WIDE_INT)
1753 low = v << bitpos;
1754 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1756 else
1758 low = 0;
1759 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1762 return immed_double_const (low, high, mode);
1765 /* Extract a bit field that is split across two words
1766 and return an RTX for the result.
1768 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1769 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1770 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1772 ALIGN is the known alignment of OP0. This is also the size of the
1773 memory objects to be used. */
1775 static rtx
1776 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1777 rtx op0;
1778 unsigned HOST_WIDE_INT bitsize, bitpos;
1779 int unsignedp;
1780 unsigned int align;
1782 unsigned int unit;
1783 unsigned int bitsdone = 0;
1784 rtx result = NULL_RTX;
1785 int first = 1;
1787 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1788 much at a time. */
1789 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1790 unit = BITS_PER_WORD;
1791 else
1792 unit = MIN (align, BITS_PER_WORD);
1794 while (bitsdone < bitsize)
1796 unsigned HOST_WIDE_INT thissize;
1797 rtx part, word;
1798 unsigned HOST_WIDE_INT thispos;
1799 unsigned HOST_WIDE_INT offset;
1801 offset = (bitpos + bitsdone) / unit;
1802 thispos = (bitpos + bitsdone) % unit;
1804 /* THISSIZE must not overrun a word boundary. Otherwise,
1805 extract_fixed_bit_field will call us again, and we will mutually
1806 recurse forever. */
1807 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1808 thissize = MIN (thissize, unit - thispos);
1810 /* If OP0 is a register, then handle OFFSET here.
1812 When handling multiword bitfields, extract_bit_field may pass
1813 down a word_mode SUBREG of a larger REG for a bitfield that actually
1814 crosses a word boundary. Thus, for a SUBREG, we must find
1815 the current word starting from the base register. */
1816 if (GET_CODE (op0) == SUBREG)
1818 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1819 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1820 GET_MODE (SUBREG_REG (op0)));
1821 offset = 0;
1823 else if (GET_CODE (op0) == REG)
1825 word = operand_subword_force (op0, offset, GET_MODE (op0));
1826 offset = 0;
1828 else
1829 word = op0;
1831 /* Extract the parts in bit-counting order,
1832 whose meaning is determined by BYTES_PER_UNIT.
1833 OFFSET is in UNITs, and UNIT is in bits.
1834 extract_fixed_bit_field wants offset in bytes. */
1835 part = extract_fixed_bit_field (word_mode, word,
1836 offset * unit / BITS_PER_UNIT,
1837 thissize, thispos, 0, 1, align);
1838 bitsdone += thissize;
1840 /* Shift this part into place for the result. */
1841 if (BYTES_BIG_ENDIAN)
1843 if (bitsize != bitsdone)
1844 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1845 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1847 else
1849 if (bitsdone != thissize)
1850 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1851 build_int_2 (bitsdone - thissize, 0), 0, 1);
1854 if (first)
1855 result = part;
1856 else
1857 /* Combine the parts with bitwise or. This works
1858 because we extracted each part as an unsigned bit field. */
1859 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1860 OPTAB_LIB_WIDEN);
1862 first = 0;
1865 /* Unsigned bit field: we are done. */
1866 if (unsignedp)
1867 return result;
1868 /* Signed bit field: sign-extend with two arithmetic shifts. */
1869 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1870 build_int_2 (BITS_PER_WORD - bitsize, 0),
1871 NULL_RTX, 0);
1872 return expand_shift (RSHIFT_EXPR, word_mode, result,
1873 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1876 /* Add INC into TARGET. */
1878 void
1879 expand_inc (target, inc)
1880 rtx target, inc;
1882 rtx value = expand_binop (GET_MODE (target), add_optab,
1883 target, inc,
1884 target, 0, OPTAB_LIB_WIDEN);
1885 if (value != target)
1886 emit_move_insn (target, value);
1889 /* Subtract DEC from TARGET. */
1891 void
1892 expand_dec (target, dec)
1893 rtx target, dec;
1895 rtx value = expand_binop (GET_MODE (target), sub_optab,
1896 target, dec,
1897 target, 0, OPTAB_LIB_WIDEN);
1898 if (value != target)
1899 emit_move_insn (target, value);
1902 /* Output a shift instruction for expression code CODE,
1903 with SHIFTED being the rtx for the value to shift,
1904 and AMOUNT the tree for the amount to shift by.
1905 Store the result in the rtx TARGET, if that is convenient.
1906 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1907 Return the rtx for where the value is. */
1910 expand_shift (code, mode, shifted, amount, target, unsignedp)
1911 enum tree_code code;
1912 register enum machine_mode mode;
1913 rtx shifted;
1914 tree amount;
1915 register rtx target;
1916 int unsignedp;
1918 register rtx op1, temp = 0;
1919 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1920 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1921 int try;
1923 /* Previously detected shift-counts computed by NEGATE_EXPR
1924 and shifted in the other direction; but that does not work
1925 on all machines. */
1927 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1929 #ifdef SHIFT_COUNT_TRUNCATED
1930 if (SHIFT_COUNT_TRUNCATED)
1932 if (GET_CODE (op1) == CONST_INT
1933 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1934 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1935 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1936 % GET_MODE_BITSIZE (mode));
1937 else if (GET_CODE (op1) == SUBREG
1938 && SUBREG_BYTE (op1) == 0)
1939 op1 = SUBREG_REG (op1);
1941 #endif
1943 if (op1 == const0_rtx)
1944 return shifted;
1946 for (try = 0; temp == 0 && try < 3; try++)
1948 enum optab_methods methods;
1950 if (try == 0)
1951 methods = OPTAB_DIRECT;
1952 else if (try == 1)
1953 methods = OPTAB_WIDEN;
1954 else
1955 methods = OPTAB_LIB_WIDEN;
1957 if (rotate)
1959 /* Widening does not work for rotation. */
1960 if (methods == OPTAB_WIDEN)
1961 continue;
1962 else if (methods == OPTAB_LIB_WIDEN)
1964 /* If we have been unable to open-code this by a rotation,
1965 do it as the IOR of two shifts. I.e., to rotate A
1966 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1967 where C is the bitsize of A.
1969 It is theoretically possible that the target machine might
1970 not be able to perform either shift and hence we would
1971 be making two libcalls rather than just the one for the
1972 shift (similarly if IOR could not be done). We will allow
1973 this extremely unlikely lossage to avoid complicating the
1974 code below. */
1976 rtx subtarget = target == shifted ? 0 : target;
1977 rtx temp1;
1978 tree type = TREE_TYPE (amount);
1979 tree new_amount = make_tree (type, op1);
1980 tree other_amount
1981 = fold (build (MINUS_EXPR, type,
1982 convert (type,
1983 build_int_2 (GET_MODE_BITSIZE (mode),
1984 0)),
1985 amount));
1987 shifted = force_reg (mode, shifted);
1989 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1990 mode, shifted, new_amount, subtarget, 1);
1991 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1992 mode, shifted, other_amount, 0, 1);
1993 return expand_binop (mode, ior_optab, temp, temp1, target,
1994 unsignedp, methods);
1997 temp = expand_binop (mode,
1998 left ? rotl_optab : rotr_optab,
1999 shifted, op1, target, unsignedp, methods);
2001 /* If we don't have the rotate, but we are rotating by a constant
2002 that is in range, try a rotate in the opposite direction. */
2004 if (temp == 0 && GET_CODE (op1) == CONST_INT
2005 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
2006 temp = expand_binop (mode,
2007 left ? rotr_optab : rotl_optab,
2008 shifted,
2009 GEN_INT (GET_MODE_BITSIZE (mode)
2010 - INTVAL (op1)),
2011 target, unsignedp, methods);
2013 else if (unsignedp)
2014 temp = expand_binop (mode,
2015 left ? ashl_optab : lshr_optab,
2016 shifted, op1, target, unsignedp, methods);
2018 /* Do arithmetic shifts.
2019 Also, if we are going to widen the operand, we can just as well
2020 use an arithmetic right-shift instead of a logical one. */
2021 if (temp == 0 && ! rotate
2022 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2024 enum optab_methods methods1 = methods;
2026 /* If trying to widen a log shift to an arithmetic shift,
2027 don't accept an arithmetic shift of the same size. */
2028 if (unsignedp)
2029 methods1 = OPTAB_MUST_WIDEN;
2031 /* Arithmetic shift */
2033 temp = expand_binop (mode,
2034 left ? ashl_optab : ashr_optab,
2035 shifted, op1, target, unsignedp, methods1);
2038 /* We used to try extzv here for logical right shifts, but that was
2039 only useful for one machine, the VAX, and caused poor code
2040 generation there for lshrdi3, so the code was deleted and a
2041 define_expand for lshrsi3 was added to vax.md. */
2044 if (temp == 0)
2045 abort ();
2046 return temp;
2049 enum alg_code { alg_zero, alg_m, alg_shift,
2050 alg_add_t_m2, alg_sub_t_m2,
2051 alg_add_factor, alg_sub_factor,
2052 alg_add_t2_m, alg_sub_t2_m,
2053 alg_add, alg_subtract, alg_factor, alg_shiftop };
2055 /* This structure records a sequence of operations.
2056 `ops' is the number of operations recorded.
2057 `cost' is their total cost.
2058 The operations are stored in `op' and the corresponding
2059 logarithms of the integer coefficients in `log'.
2061 These are the operations:
2062 alg_zero total := 0;
2063 alg_m total := multiplicand;
2064 alg_shift total := total * coeff
2065 alg_add_t_m2 total := total + multiplicand * coeff;
2066 alg_sub_t_m2 total := total - multiplicand * coeff;
2067 alg_add_factor total := total * coeff + total;
2068 alg_sub_factor total := total * coeff - total;
2069 alg_add_t2_m total := total * coeff + multiplicand;
2070 alg_sub_t2_m total := total * coeff - multiplicand;
2072 The first operand must be either alg_zero or alg_m. */
2074 struct algorithm
2076 short cost;
2077 short ops;
2078 /* The size of the OP and LOG fields are not directly related to the
2079 word size, but the worst-case algorithms will be if we have few
2080 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2081 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2082 in total wordsize operations. */
2083 enum alg_code op[MAX_BITS_PER_WORD];
2084 char log[MAX_BITS_PER_WORD];
2087 static void synth_mult PARAMS ((struct algorithm *,
2088 unsigned HOST_WIDE_INT,
2089 int));
2090 static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2091 int, int,
2092 unsigned HOST_WIDE_INT *,
2093 int *, int *));
2094 static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
2095 int));
2096 /* Compute and return the best algorithm for multiplying by T.
2097 The algorithm must cost less than cost_limit
2098 If retval.cost >= COST_LIMIT, no algorithm was found and all
2099 other field of the returned struct are undefined. */
2101 static void
2102 synth_mult (alg_out, t, cost_limit)
2103 struct algorithm *alg_out;
2104 unsigned HOST_WIDE_INT t;
2105 int cost_limit;
2107 int m;
2108 struct algorithm *alg_in, *best_alg;
2109 int cost;
2110 unsigned HOST_WIDE_INT q;
2112 /* Indicate that no algorithm is yet found. If no algorithm
2113 is found, this value will be returned and indicate failure. */
2114 alg_out->cost = cost_limit;
2116 if (cost_limit <= 0)
2117 return;
2119 /* t == 1 can be done in zero cost. */
2120 if (t == 1)
2122 alg_out->ops = 1;
2123 alg_out->cost = 0;
2124 alg_out->op[0] = alg_m;
2125 return;
2128 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2129 fail now. */
2130 if (t == 0)
2132 if (zero_cost >= cost_limit)
2133 return;
2134 else
2136 alg_out->ops = 1;
2137 alg_out->cost = zero_cost;
2138 alg_out->op[0] = alg_zero;
2139 return;
2143 /* We'll be needing a couple extra algorithm structures now. */
2145 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2146 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2148 /* If we have a group of zero bits at the low-order part of T, try
2149 multiplying by the remaining bits and then doing a shift. */
2151 if ((t & 1) == 0)
2153 m = floor_log2 (t & -t); /* m = number of low zero bits */
2154 if (m < BITS_PER_WORD)
2156 q = t >> m;
2157 cost = shift_cost[m];
2158 synth_mult (alg_in, q, cost_limit - cost);
2160 cost += alg_in->cost;
2161 if (cost < cost_limit)
2163 struct algorithm *x;
2164 x = alg_in, alg_in = best_alg, best_alg = x;
2165 best_alg->log[best_alg->ops] = m;
2166 best_alg->op[best_alg->ops] = alg_shift;
2167 cost_limit = cost;
2172 /* If we have an odd number, add or subtract one. */
2173 if ((t & 1) != 0)
2175 unsigned HOST_WIDE_INT w;
2177 for (w = 1; (w & t) != 0; w <<= 1)
2179 /* If T was -1, then W will be zero after the loop. This is another
2180 case where T ends with ...111. Handling this with (T + 1) and
2181 subtract 1 produces slightly better code and results in algorithm
2182 selection much faster than treating it like the ...0111 case
2183 below. */
2184 if (w == 0
2185 || (w > 2
2186 /* Reject the case where t is 3.
2187 Thus we prefer addition in that case. */
2188 && t != 3))
2190 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2192 cost = add_cost;
2193 synth_mult (alg_in, t + 1, cost_limit - cost);
2195 cost += alg_in->cost;
2196 if (cost < cost_limit)
2198 struct algorithm *x;
2199 x = alg_in, alg_in = best_alg, best_alg = x;
2200 best_alg->log[best_alg->ops] = 0;
2201 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2202 cost_limit = cost;
2205 else
2207 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2209 cost = add_cost;
2210 synth_mult (alg_in, t - 1, cost_limit - cost);
2212 cost += alg_in->cost;
2213 if (cost < cost_limit)
2215 struct algorithm *x;
2216 x = alg_in, alg_in = best_alg, best_alg = x;
2217 best_alg->log[best_alg->ops] = 0;
2218 best_alg->op[best_alg->ops] = alg_add_t_m2;
2219 cost_limit = cost;
2224 /* Look for factors of t of the form
2225 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2226 If we find such a factor, we can multiply by t using an algorithm that
2227 multiplies by q, shift the result by m and add/subtract it to itself.
2229 We search for large factors first and loop down, even if large factors
2230 are less probable than small; if we find a large factor we will find a
2231 good sequence quickly, and therefore be able to prune (by decreasing
2232 COST_LIMIT) the search. */
2234 for (m = floor_log2 (t - 1); m >= 2; m--)
2236 unsigned HOST_WIDE_INT d;
2238 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2239 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2241 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2242 synth_mult (alg_in, t / d, cost_limit - cost);
2244 cost += alg_in->cost;
2245 if (cost < cost_limit)
2247 struct algorithm *x;
2248 x = alg_in, alg_in = best_alg, best_alg = x;
2249 best_alg->log[best_alg->ops] = m;
2250 best_alg->op[best_alg->ops] = alg_add_factor;
2251 cost_limit = cost;
2253 /* Other factors will have been taken care of in the recursion. */
2254 break;
2257 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2258 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2260 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2261 synth_mult (alg_in, t / d, cost_limit - cost);
2263 cost += alg_in->cost;
2264 if (cost < cost_limit)
2266 struct algorithm *x;
2267 x = alg_in, alg_in = best_alg, best_alg = x;
2268 best_alg->log[best_alg->ops] = m;
2269 best_alg->op[best_alg->ops] = alg_sub_factor;
2270 cost_limit = cost;
2272 break;
2276 /* Try shift-and-add (load effective address) instructions,
2277 i.e. do a*3, a*5, a*9. */
2278 if ((t & 1) != 0)
2280 q = t - 1;
2281 q = q & -q;
2282 m = exact_log2 (q);
2283 if (m >= 0 && m < BITS_PER_WORD)
2285 cost = shiftadd_cost[m];
2286 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2288 cost += alg_in->cost;
2289 if (cost < cost_limit)
2291 struct algorithm *x;
2292 x = alg_in, alg_in = best_alg, best_alg = x;
2293 best_alg->log[best_alg->ops] = m;
2294 best_alg->op[best_alg->ops] = alg_add_t2_m;
2295 cost_limit = cost;
2299 q = t + 1;
2300 q = q & -q;
2301 m = exact_log2 (q);
2302 if (m >= 0 && m < BITS_PER_WORD)
2304 cost = shiftsub_cost[m];
2305 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2307 cost += alg_in->cost;
2308 if (cost < cost_limit)
2310 struct algorithm *x;
2311 x = alg_in, alg_in = best_alg, best_alg = x;
2312 best_alg->log[best_alg->ops] = m;
2313 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2314 cost_limit = cost;
2319 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2320 we have not found any algorithm. */
2321 if (cost_limit == alg_out->cost)
2322 return;
2324 /* If we are getting a too long sequence for `struct algorithm'
2325 to record, make this search fail. */
2326 if (best_alg->ops == MAX_BITS_PER_WORD)
2327 return;
2329 /* Copy the algorithm from temporary space to the space at alg_out.
2330 We avoid using structure assignment because the majority of
2331 best_alg is normally undefined, and this is a critical function. */
2332 alg_out->ops = best_alg->ops + 1;
2333 alg_out->cost = cost_limit;
2334 memcpy (alg_out->op, best_alg->op,
2335 alg_out->ops * sizeof *alg_out->op);
2336 memcpy (alg_out->log, best_alg->log,
2337 alg_out->ops * sizeof *alg_out->log);
2340 /* Perform a multiplication and return an rtx for the result.
2341 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2342 TARGET is a suggestion for where to store the result (an rtx).
2344 We check specially for a constant integer as OP1.
2345 If you want this check for OP0 as well, then before calling
2346 you should swap the two operands if OP0 would be constant. */
2349 expand_mult (mode, op0, op1, target, unsignedp)
2350 enum machine_mode mode;
2351 register rtx op0, op1, target;
2352 int unsignedp;
2354 rtx const_op1 = op1;
2356 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2357 less than or equal in size to `unsigned int' this doesn't matter.
2358 If the mode is larger than `unsigned int', then synth_mult works only
2359 if the constant value exactly fits in an `unsigned int' without any
2360 truncation. This means that multiplying by negative values does
2361 not work; results are off by 2^32 on a 32 bit machine. */
2363 /* If we are multiplying in DImode, it may still be a win
2364 to try to work with shifts and adds. */
2365 if (GET_CODE (op1) == CONST_DOUBLE
2366 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2367 && HOST_BITS_PER_INT >= BITS_PER_WORD
2368 && CONST_DOUBLE_HIGH (op1) == 0)
2369 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2370 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2371 && GET_CODE (op1) == CONST_INT
2372 && INTVAL (op1) < 0)
2373 const_op1 = 0;
2375 /* We used to test optimize here, on the grounds that it's better to
2376 produce a smaller program when -O is not used.
2377 But this causes such a terrible slowdown sometimes
2378 that it seems better to use synth_mult always. */
2380 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2381 && (unsignedp || ! flag_trapv))
2383 struct algorithm alg;
2384 struct algorithm alg2;
2385 HOST_WIDE_INT val = INTVAL (op1);
2386 HOST_WIDE_INT val_so_far;
2387 rtx insn;
2388 int mult_cost;
2389 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2391 /* Try to do the computation three ways: multiply by the negative of OP1
2392 and then negate, do the multiplication directly, or do multiplication
2393 by OP1 - 1. */
2395 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2396 mult_cost = MIN (12 * add_cost, mult_cost);
2398 synth_mult (&alg, val, mult_cost);
2400 /* This works only if the inverted value actually fits in an
2401 `unsigned int' */
2402 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2404 synth_mult (&alg2, - val,
2405 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2406 if (alg2.cost + negate_cost < alg.cost)
2407 alg = alg2, variant = negate_variant;
2410 /* This proves very useful for division-by-constant. */
2411 synth_mult (&alg2, val - 1,
2412 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2413 if (alg2.cost + add_cost < alg.cost)
2414 alg = alg2, variant = add_variant;
2416 if (alg.cost < mult_cost)
2418 /* We found something cheaper than a multiply insn. */
2419 int opno;
2420 rtx accum, tem;
2421 enum machine_mode nmode;
2423 op0 = protect_from_queue (op0, 0);
2425 /* Avoid referencing memory over and over.
2426 For speed, but also for correctness when mem is volatile. */
2427 if (GET_CODE (op0) == MEM)
2428 op0 = force_reg (mode, op0);
2430 /* ACCUM starts out either as OP0 or as a zero, depending on
2431 the first operation. */
2433 if (alg.op[0] == alg_zero)
2435 accum = copy_to_mode_reg (mode, const0_rtx);
2436 val_so_far = 0;
2438 else if (alg.op[0] == alg_m)
2440 accum = copy_to_mode_reg (mode, op0);
2441 val_so_far = 1;
2443 else
2444 abort ();
2446 for (opno = 1; opno < alg.ops; opno++)
2448 int log = alg.log[opno];
2449 int preserve = preserve_subexpressions_p ();
2450 rtx shift_subtarget = preserve ? 0 : accum;
2451 rtx add_target
2452 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2453 && ! preserve)
2454 ? target : 0;
2455 rtx accum_target = preserve ? 0 : accum;
2457 switch (alg.op[opno])
2459 case alg_shift:
2460 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2461 build_int_2 (log, 0), NULL_RTX, 0);
2462 val_so_far <<= log;
2463 break;
2465 case alg_add_t_m2:
2466 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2467 build_int_2 (log, 0), NULL_RTX, 0);
2468 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2469 add_target
2470 ? add_target : accum_target);
2471 val_so_far += (HOST_WIDE_INT) 1 << log;
2472 break;
2474 case alg_sub_t_m2:
2475 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2476 build_int_2 (log, 0), NULL_RTX, 0);
2477 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2478 add_target
2479 ? add_target : accum_target);
2480 val_so_far -= (HOST_WIDE_INT) 1 << log;
2481 break;
2483 case alg_add_t2_m:
2484 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2485 build_int_2 (log, 0), shift_subtarget,
2487 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2488 add_target
2489 ? add_target : accum_target);
2490 val_so_far = (val_so_far << log) + 1;
2491 break;
2493 case alg_sub_t2_m:
2494 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2495 build_int_2 (log, 0), shift_subtarget,
2497 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2498 add_target
2499 ? add_target : accum_target);
2500 val_so_far = (val_so_far << log) - 1;
2501 break;
2503 case alg_add_factor:
2504 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2505 build_int_2 (log, 0), NULL_RTX, 0);
2506 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2507 add_target
2508 ? add_target : accum_target);
2509 val_so_far += val_so_far << log;
2510 break;
2512 case alg_sub_factor:
2513 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2514 build_int_2 (log, 0), NULL_RTX, 0);
2515 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2516 (add_target ? add_target
2517 : preserve ? 0 : tem));
2518 val_so_far = (val_so_far << log) - val_so_far;
2519 break;
2521 default:
2522 abort ();
2525 /* Write a REG_EQUAL note on the last insn so that we can cse
2526 multiplication sequences. Note that if ACCUM is a SUBREG,
2527 we've set the inner register and must properly indicate
2528 that. */
2530 tem = op0, nmode = mode;
2531 if (GET_CODE (accum) == SUBREG)
2533 nmode = GET_MODE (SUBREG_REG (accum));
2534 tem = gen_lowpart (nmode, op0);
2537 insn = get_last_insn ();
2538 set_unique_reg_note (insn,
2539 REG_EQUAL,
2540 gen_rtx_MULT (nmode, tem,
2541 GEN_INT (val_so_far)));
2544 if (variant == negate_variant)
2546 val_so_far = - val_so_far;
2547 accum = expand_unop (mode, neg_optab, accum, target, 0);
2549 else if (variant == add_variant)
2551 val_so_far = val_so_far + 1;
2552 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2555 if (val != val_so_far)
2556 abort ();
2558 return accum;
2562 /* This used to use umul_optab if unsigned, but for non-widening multiply
2563 there is no difference between signed and unsigned. */
2564 op0 = expand_binop (mode,
2565 ! unsignedp
2566 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2567 ? smulv_optab : smul_optab,
2568 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2569 if (op0 == 0)
2570 abort ();
2571 return op0;
2574 /* Return the smallest n such that 2**n >= X. */
2577 ceil_log2 (x)
2578 unsigned HOST_WIDE_INT x;
2580 return floor_log2 (x - 1) + 1;
2583 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2584 replace division by D, and put the least significant N bits of the result
2585 in *MULTIPLIER_PTR and return the most significant bit.
2587 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2588 needed precision is in PRECISION (should be <= N).
2590 PRECISION should be as small as possible so this function can choose
2591 multiplier more freely.
2593 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2594 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2596 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2597 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2599 static
2600 unsigned HOST_WIDE_INT
2601 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2602 unsigned HOST_WIDE_INT d;
2603 int n;
2604 int precision;
2605 unsigned HOST_WIDE_INT *multiplier_ptr;
2606 int *post_shift_ptr;
2607 int *lgup_ptr;
2609 HOST_WIDE_INT mhigh_hi, mlow_hi;
2610 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2611 int lgup, post_shift;
2612 int pow, pow2;
2613 unsigned HOST_WIDE_INT nl, dummy1;
2614 HOST_WIDE_INT nh, dummy2;
2616 /* lgup = ceil(log2(divisor)); */
2617 lgup = ceil_log2 (d);
2619 if (lgup > n)
2620 abort ();
2622 pow = n + lgup;
2623 pow2 = n + lgup - precision;
2625 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2627 /* We could handle this with some effort, but this case is much better
2628 handled directly with a scc insn, so rely on caller using that. */
2629 abort ();
2632 /* mlow = 2^(N + lgup)/d */
2633 if (pow >= HOST_BITS_PER_WIDE_INT)
2635 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2636 nl = 0;
2638 else
2640 nh = 0;
2641 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2643 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2644 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2646 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2647 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2648 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2649 else
2650 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2651 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2652 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2654 if (mhigh_hi && nh - d >= d)
2655 abort ();
2656 if (mhigh_hi > 1 || mlow_hi > 1)
2657 abort ();
2658 /* assert that mlow < mhigh. */
2659 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2660 abort();
2662 /* If precision == N, then mlow, mhigh exceed 2^N
2663 (but they do not exceed 2^(N+1)). */
2665 /* Reduce to lowest terms */
2666 for (post_shift = lgup; post_shift > 0; post_shift--)
2668 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2669 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2670 if (ml_lo >= mh_lo)
2671 break;
2673 mlow_hi = 0;
2674 mlow_lo = ml_lo;
2675 mhigh_hi = 0;
2676 mhigh_lo = mh_lo;
2679 *post_shift_ptr = post_shift;
2680 *lgup_ptr = lgup;
2681 if (n < HOST_BITS_PER_WIDE_INT)
2683 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2684 *multiplier_ptr = mhigh_lo & mask;
2685 return mhigh_lo >= mask;
2687 else
2689 *multiplier_ptr = mhigh_lo;
2690 return mhigh_hi;
2694 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2695 congruent to 1 (mod 2**N). */
2697 static unsigned HOST_WIDE_INT
2698 invert_mod2n (x, n)
2699 unsigned HOST_WIDE_INT x;
2700 int n;
2702 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2704 /* The algorithm notes that the choice y = x satisfies
2705 x*y == 1 mod 2^3, since x is assumed odd.
2706 Each iteration doubles the number of bits of significance in y. */
2708 unsigned HOST_WIDE_INT mask;
2709 unsigned HOST_WIDE_INT y = x;
2710 int nbit = 3;
2712 mask = (n == HOST_BITS_PER_WIDE_INT
2713 ? ~(unsigned HOST_WIDE_INT) 0
2714 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2716 while (nbit < n)
2718 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2719 nbit *= 2;
2721 return y;
2724 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2725 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2726 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2727 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2728 become signed.
2730 The result is put in TARGET if that is convenient.
2732 MODE is the mode of operation. */
2735 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2736 enum machine_mode mode;
2737 register rtx adj_operand, op0, op1, target;
2738 int unsignedp;
2740 rtx tem;
2741 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2743 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2744 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2745 NULL_RTX, 0);
2746 tem = expand_and (tem, op1, NULL_RTX);
2747 adj_operand
2748 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2749 adj_operand);
2751 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2752 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2753 NULL_RTX, 0);
2754 tem = expand_and (tem, op0, NULL_RTX);
2755 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2756 target);
2758 return target;
2761 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2762 in TARGET if that is convenient, and return where the result is. If the
2763 operation can not be performed, 0 is returned.
2765 MODE is the mode of operation and result.
2767 UNSIGNEDP nonzero means unsigned multiply.
2769 MAX_COST is the total allowed cost for the expanded RTL. */
2772 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2773 enum machine_mode mode;
2774 register rtx op0, target;
2775 unsigned HOST_WIDE_INT cnst1;
2776 int unsignedp;
2777 int max_cost;
2779 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2780 optab mul_highpart_optab;
2781 optab moptab;
2782 rtx tem;
2783 int size = GET_MODE_BITSIZE (mode);
2784 rtx op1, wide_op1;
2786 /* We can't support modes wider than HOST_BITS_PER_INT. */
2787 if (size > HOST_BITS_PER_WIDE_INT)
2788 abort ();
2790 op1 = GEN_INT (cnst1);
2792 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2793 wide_op1 = op1;
2794 else
2795 wide_op1
2796 = immed_double_const (cnst1,
2797 (unsignedp
2798 ? (HOST_WIDE_INT) 0
2799 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2800 wider_mode);
2802 /* expand_mult handles constant multiplication of word_mode
2803 or narrower. It does a poor job for large modes. */
2804 if (size < BITS_PER_WORD
2805 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2807 /* We have to do this, since expand_binop doesn't do conversion for
2808 multiply. Maybe change expand_binop to handle widening multiply? */
2809 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2811 /* We know that this can't have signed overflow, so pretend this is
2812 an unsigned multiply. */
2813 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, 0);
2814 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2815 build_int_2 (size, 0), NULL_RTX, 1);
2816 return convert_modes (mode, wider_mode, tem, unsignedp);
2819 if (target == 0)
2820 target = gen_reg_rtx (mode);
2822 /* Firstly, try using a multiplication insn that only generates the needed
2823 high part of the product, and in the sign flavor of unsignedp. */
2824 if (mul_highpart_cost[(int) mode] < max_cost)
2826 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2827 target = expand_binop (mode, mul_highpart_optab,
2828 op0, op1, target, unsignedp, OPTAB_DIRECT);
2829 if (target)
2830 return target;
2833 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2834 Need to adjust the result after the multiplication. */
2835 if (size - 1 < BITS_PER_WORD
2836 && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
2837 < max_cost))
2839 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2840 target = expand_binop (mode, mul_highpart_optab,
2841 op0, op1, target, unsignedp, OPTAB_DIRECT);
2842 if (target)
2843 /* We used the wrong signedness. Adjust the result. */
2844 return expand_mult_highpart_adjust (mode, target, op0,
2845 op1, target, unsignedp);
2848 /* Try widening multiplication. */
2849 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2850 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2851 && mul_widen_cost[(int) wider_mode] < max_cost)
2853 op1 = force_reg (mode, op1);
2854 goto try;
2857 /* Try widening the mode and perform a non-widening multiplication. */
2858 moptab = smul_optab;
2859 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2860 && size - 1 < BITS_PER_WORD
2861 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2863 op1 = wide_op1;
2864 goto try;
2867 /* Try widening multiplication of opposite signedness, and adjust. */
2868 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2869 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2870 && size - 1 < BITS_PER_WORD
2871 && (mul_widen_cost[(int) wider_mode]
2872 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2874 rtx regop1 = force_reg (mode, op1);
2875 tem = expand_binop (wider_mode, moptab, op0, regop1,
2876 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2877 if (tem != 0)
2879 /* Extract the high half of the just generated product. */
2880 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2881 build_int_2 (size, 0), NULL_RTX, 1);
2882 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2883 /* We used the wrong signedness. Adjust the result. */
2884 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2885 target, unsignedp);
2889 return 0;
2891 try:
2892 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2893 tem = expand_binop (wider_mode, moptab, op0, op1,
2894 NULL_RTX, unsignedp, OPTAB_WIDEN);
2895 if (tem == 0)
2896 return 0;
2898 /* Extract the high half of the just generated product. */
2899 if (mode == word_mode)
2901 return gen_highpart (mode, tem);
2903 else
2905 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2906 build_int_2 (size, 0), NULL_RTX, 1);
2907 return convert_modes (mode, wider_mode, tem, unsignedp);
2911 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2912 if that is convenient, and returning where the result is.
2913 You may request either the quotient or the remainder as the result;
2914 specify REM_FLAG nonzero to get the remainder.
2916 CODE is the expression code for which kind of division this is;
2917 it controls how rounding is done. MODE is the machine mode to use.
2918 UNSIGNEDP nonzero means do unsigned division. */
2920 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2921 and then correct it by or'ing in missing high bits
2922 if result of ANDI is nonzero.
2923 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2924 This could optimize to a bfexts instruction.
2925 But C doesn't use these operations, so their optimizations are
2926 left for later. */
2927 /* ??? For modulo, we don't actually need the highpart of the first product,
2928 the low part will do nicely. And for small divisors, the second multiply
2929 can also be a low-part only multiply or even be completely left out.
2930 E.g. to calculate the remainder of a division by 3 with a 32 bit
2931 multiply, multiply with 0x55555556 and extract the upper two bits;
2932 the result is exact for inputs up to 0x1fffffff.
2933 The input range can be reduced by using cross-sum rules.
2934 For odd divisors >= 3, the following table gives right shift counts
2935 so that if an number is shifted by an integer multiple of the given
2936 amount, the remainder stays the same:
2937 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2938 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2939 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2940 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2941 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2943 Cross-sum rules for even numbers can be derived by leaving as many bits
2944 to the right alone as the divisor has zeros to the right.
2945 E.g. if x is an unsigned 32 bit number:
2946 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2949 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2952 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2953 int rem_flag;
2954 enum tree_code code;
2955 enum machine_mode mode;
2956 register rtx op0, op1, target;
2957 int unsignedp;
2959 enum machine_mode compute_mode;
2960 register rtx tquotient;
2961 rtx quotient = 0, remainder = 0;
2962 rtx last;
2963 int size;
2964 rtx insn, set;
2965 optab optab1, optab2;
2966 int op1_is_constant, op1_is_pow2;
2967 int max_cost, extra_cost;
2968 static HOST_WIDE_INT last_div_const = 0;
2970 op1_is_constant = GET_CODE (op1) == CONST_INT;
2971 op1_is_pow2 = (op1_is_constant
2972 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2973 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2976 This is the structure of expand_divmod:
2978 First comes code to fix up the operands so we can perform the operations
2979 correctly and efficiently.
2981 Second comes a switch statement with code specific for each rounding mode.
2982 For some special operands this code emits all RTL for the desired
2983 operation, for other cases, it generates only a quotient and stores it in
2984 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2985 to indicate that it has not done anything.
2987 Last comes code that finishes the operation. If QUOTIENT is set and
2988 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2989 QUOTIENT is not set, it is computed using trunc rounding.
2991 We try to generate special code for division and remainder when OP1 is a
2992 constant. If |OP1| = 2**n we can use shifts and some other fast
2993 operations. For other values of OP1, we compute a carefully selected
2994 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2995 by m.
2997 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2998 half of the product. Different strategies for generating the product are
2999 implemented in expand_mult_highpart.
3001 If what we actually want is the remainder, we generate that by another
3002 by-constant multiplication and a subtraction. */
3004 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3005 code below will malfunction if we are, so check here and handle
3006 the special case if so. */
3007 if (op1 == const1_rtx)
3008 return rem_flag ? const0_rtx : op0;
3010 /* When dividing by -1, we could get an overflow.
3011 negv_optab can handle overflows. */
3012 if (! unsignedp && op1 == constm1_rtx)
3014 if (rem_flag)
3015 return const0_rtx;
3016 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3017 ? negv_optab : neg_optab, op0, target, 0);
3020 if (target
3021 /* Don't use the function value register as a target
3022 since we have to read it as well as write it,
3023 and function-inlining gets confused by this. */
3024 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3025 /* Don't clobber an operand while doing a multi-step calculation. */
3026 || ((rem_flag || op1_is_constant)
3027 && (reg_mentioned_p (target, op0)
3028 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3029 || reg_mentioned_p (target, op1)
3030 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3031 target = 0;
3033 /* Get the mode in which to perform this computation. Normally it will
3034 be MODE, but sometimes we can't do the desired operation in MODE.
3035 If so, pick a wider mode in which we can do the operation. Convert
3036 to that mode at the start to avoid repeated conversions.
3038 First see what operations we need. These depend on the expression
3039 we are evaluating. (We assume that divxx3 insns exist under the
3040 same conditions that modxx3 insns and that these insns don't normally
3041 fail. If these assumptions are not correct, we may generate less
3042 efficient code in some cases.)
3044 Then see if we find a mode in which we can open-code that operation
3045 (either a division, modulus, or shift). Finally, check for the smallest
3046 mode for which we can do the operation with a library call. */
3048 /* We might want to refine this now that we have division-by-constant
3049 optimization. Since expand_mult_highpart tries so many variants, it is
3050 not straightforward to generalize this. Maybe we should make an array
3051 of possible modes in init_expmed? Save this for GCC 2.7. */
3053 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
3054 : (unsignedp ? udiv_optab : sdiv_optab));
3055 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
3057 for (compute_mode = mode; compute_mode != VOIDmode;
3058 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3059 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3060 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3061 break;
3063 if (compute_mode == VOIDmode)
3064 for (compute_mode = mode; compute_mode != VOIDmode;
3065 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3066 if (optab1->handlers[(int) compute_mode].libfunc
3067 || optab2->handlers[(int) compute_mode].libfunc)
3068 break;
3070 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3071 in expand_binop. */
3072 if (compute_mode == VOIDmode)
3073 compute_mode = mode;
3075 if (target && GET_MODE (target) == compute_mode)
3076 tquotient = target;
3077 else
3078 tquotient = gen_reg_rtx (compute_mode);
3080 size = GET_MODE_BITSIZE (compute_mode);
3081 #if 0
3082 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3083 (mode), and thereby get better code when OP1 is a constant. Do that
3084 later. It will require going over all usages of SIZE below. */
3085 size = GET_MODE_BITSIZE (mode);
3086 #endif
3088 /* Only deduct something for a REM if the last divide done was
3089 for a different constant. Then set the constant of the last
3090 divide. */
3091 max_cost = div_cost[(int) compute_mode]
3092 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3093 && INTVAL (op1) == last_div_const)
3094 ? mul_cost[(int) compute_mode] + add_cost : 0);
3096 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3098 /* Now convert to the best mode to use. */
3099 if (compute_mode != mode)
3101 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3102 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3104 /* convert_modes may have placed op1 into a register, so we
3105 must recompute the following. */
3106 op1_is_constant = GET_CODE (op1) == CONST_INT;
3107 op1_is_pow2 = (op1_is_constant
3108 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3109 || (! unsignedp
3110 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3113 /* If one of the operands is a volatile MEM, copy it into a register. */
3115 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3116 op0 = force_reg (compute_mode, op0);
3117 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3118 op1 = force_reg (compute_mode, op1);
3120 /* If we need the remainder or if OP1 is constant, we need to
3121 put OP0 in a register in case it has any queued subexpressions. */
3122 if (rem_flag || op1_is_constant)
3123 op0 = force_reg (compute_mode, op0);
3125 last = get_last_insn ();
3127 /* Promote floor rounding to trunc rounding for unsigned operations. */
3128 if (unsignedp)
3130 if (code == FLOOR_DIV_EXPR)
3131 code = TRUNC_DIV_EXPR;
3132 if (code == FLOOR_MOD_EXPR)
3133 code = TRUNC_MOD_EXPR;
3134 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3135 code = TRUNC_DIV_EXPR;
3138 if (op1 != const0_rtx)
3139 switch (code)
3141 case TRUNC_MOD_EXPR:
3142 case TRUNC_DIV_EXPR:
3143 if (op1_is_constant)
3145 if (unsignedp)
3147 unsigned HOST_WIDE_INT mh, ml;
3148 int pre_shift, post_shift;
3149 int dummy;
3150 unsigned HOST_WIDE_INT d = INTVAL (op1);
3152 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3154 pre_shift = floor_log2 (d);
3155 if (rem_flag)
3157 remainder
3158 = expand_binop (compute_mode, and_optab, op0,
3159 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3160 remainder, 1,
3161 OPTAB_LIB_WIDEN);
3162 if (remainder)
3163 return gen_lowpart (mode, remainder);
3165 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3166 build_int_2 (pre_shift, 0),
3167 tquotient, 1);
3169 else if (size <= HOST_BITS_PER_WIDE_INT)
3171 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3173 /* Most significant bit of divisor is set; emit an scc
3174 insn. */
3175 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3176 compute_mode, 1, 1);
3177 if (quotient == 0)
3178 goto fail1;
3180 else
3182 /* Find a suitable multiplier and right shift count
3183 instead of multiplying with D. */
3185 mh = choose_multiplier (d, size, size,
3186 &ml, &post_shift, &dummy);
3188 /* If the suggested multiplier is more than SIZE bits,
3189 we can do better for even divisors, using an
3190 initial right shift. */
3191 if (mh != 0 && (d & 1) == 0)
3193 pre_shift = floor_log2 (d & -d);
3194 mh = choose_multiplier (d >> pre_shift, size,
3195 size - pre_shift,
3196 &ml, &post_shift, &dummy);
3197 if (mh)
3198 abort ();
3200 else
3201 pre_shift = 0;
3203 if (mh != 0)
3205 rtx t1, t2, t3, t4;
3207 if (post_shift - 1 >= BITS_PER_WORD)
3208 goto fail1;
3210 extra_cost = (shift_cost[post_shift - 1]
3211 + shift_cost[1] + 2 * add_cost);
3212 t1 = expand_mult_highpart (compute_mode, op0, ml,
3213 NULL_RTX, 1,
3214 max_cost - extra_cost);
3215 if (t1 == 0)
3216 goto fail1;
3217 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3218 op0, t1),
3219 NULL_RTX);
3220 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3221 build_int_2 (1, 0), NULL_RTX,1);
3222 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3223 t1, t3),
3224 NULL_RTX);
3225 quotient
3226 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3227 build_int_2 (post_shift - 1, 0),
3228 tquotient, 1);
3230 else
3232 rtx t1, t2;
3234 if (pre_shift >= BITS_PER_WORD
3235 || post_shift >= BITS_PER_WORD)
3236 goto fail1;
3238 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3239 build_int_2 (pre_shift, 0),
3240 NULL_RTX, 1);
3241 extra_cost = (shift_cost[pre_shift]
3242 + shift_cost[post_shift]);
3243 t2 = expand_mult_highpart (compute_mode, t1, ml,
3244 NULL_RTX, 1,
3245 max_cost - extra_cost);
3246 if (t2 == 0)
3247 goto fail1;
3248 quotient
3249 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3250 build_int_2 (post_shift, 0),
3251 tquotient, 1);
3255 else /* Too wide mode to use tricky code */
3256 break;
3258 insn = get_last_insn ();
3259 if (insn != last
3260 && (set = single_set (insn)) != 0
3261 && SET_DEST (set) == quotient)
3262 set_unique_reg_note (insn,
3263 REG_EQUAL,
3264 gen_rtx_UDIV (compute_mode, op0, op1));
3266 else /* TRUNC_DIV, signed */
3268 unsigned HOST_WIDE_INT ml;
3269 int lgup, post_shift;
3270 HOST_WIDE_INT d = INTVAL (op1);
3271 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3273 /* n rem d = n rem -d */
3274 if (rem_flag && d < 0)
3276 d = abs_d;
3277 op1 = GEN_INT (abs_d);
3280 if (d == 1)
3281 quotient = op0;
3282 else if (d == -1)
3283 quotient = expand_unop (compute_mode, neg_optab, op0,
3284 tquotient, 0);
3285 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3287 /* This case is not handled correctly below. */
3288 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3289 compute_mode, 1, 1);
3290 if (quotient == 0)
3291 goto fail1;
3293 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3294 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3296 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3298 lgup = floor_log2 (abs_d);
3299 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3301 rtx label = gen_label_rtx ();
3302 rtx t1;
3304 t1 = copy_to_mode_reg (compute_mode, op0);
3305 do_cmp_and_jump (t1, const0_rtx, GE,
3306 compute_mode, label);
3307 expand_inc (t1, GEN_INT (abs_d - 1));
3308 emit_label (label);
3309 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3310 build_int_2 (lgup, 0),
3311 tquotient, 0);
3313 else
3315 rtx t1, t2, t3;
3316 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3317 build_int_2 (size - 1, 0),
3318 NULL_RTX, 0);
3319 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3320 build_int_2 (size - lgup, 0),
3321 NULL_RTX, 1);
3322 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3323 op0, t2),
3324 NULL_RTX);
3325 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3326 build_int_2 (lgup, 0),
3327 tquotient, 0);
3330 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3331 the quotient. */
3332 if (d < 0)
3334 insn = get_last_insn ();
3335 if (insn != last
3336 && (set = single_set (insn)) != 0
3337 && SET_DEST (set) == quotient
3338 && abs_d < ((unsigned HOST_WIDE_INT) 1
3339 << (HOST_BITS_PER_WIDE_INT - 1)))
3340 set_unique_reg_note (insn,
3341 REG_EQUAL,
3342 gen_rtx_DIV (compute_mode,
3343 op0,
3344 GEN_INT (abs_d)));
3346 quotient = expand_unop (compute_mode, neg_optab,
3347 quotient, quotient, 0);
3350 else if (size <= HOST_BITS_PER_WIDE_INT)
3352 choose_multiplier (abs_d, size, size - 1,
3353 &ml, &post_shift, &lgup);
3354 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3356 rtx t1, t2, t3;
3358 if (post_shift >= BITS_PER_WORD
3359 || size - 1 >= BITS_PER_WORD)
3360 goto fail1;
3362 extra_cost = (shift_cost[post_shift]
3363 + shift_cost[size - 1] + add_cost);
3364 t1 = expand_mult_highpart (compute_mode, op0, ml,
3365 NULL_RTX, 0,
3366 max_cost - extra_cost);
3367 if (t1 == 0)
3368 goto fail1;
3369 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3370 build_int_2 (post_shift, 0), NULL_RTX, 0);
3371 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3372 build_int_2 (size - 1, 0), NULL_RTX, 0);
3373 if (d < 0)
3374 quotient
3375 = force_operand (gen_rtx_MINUS (compute_mode,
3376 t3, t2),
3377 tquotient);
3378 else
3379 quotient
3380 = force_operand (gen_rtx_MINUS (compute_mode,
3381 t2, t3),
3382 tquotient);
3384 else
3386 rtx t1, t2, t3, t4;
3388 if (post_shift >= BITS_PER_WORD
3389 || size - 1 >= BITS_PER_WORD)
3390 goto fail1;
3392 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3393 extra_cost = (shift_cost[post_shift]
3394 + shift_cost[size - 1] + 2 * add_cost);
3395 t1 = expand_mult_highpart (compute_mode, op0, ml,
3396 NULL_RTX, 0,
3397 max_cost - extra_cost);
3398 if (t1 == 0)
3399 goto fail1;
3400 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3401 t1, op0),
3402 NULL_RTX);
3403 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3404 build_int_2 (post_shift, 0),
3405 NULL_RTX, 0);
3406 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3407 build_int_2 (size - 1, 0),
3408 NULL_RTX, 0);
3409 if (d < 0)
3410 quotient
3411 = force_operand (gen_rtx_MINUS (compute_mode,
3412 t4, t3),
3413 tquotient);
3414 else
3415 quotient
3416 = force_operand (gen_rtx_MINUS (compute_mode,
3417 t3, t4),
3418 tquotient);
3421 else /* Too wide mode to use tricky code */
3422 break;
3424 insn = get_last_insn ();
3425 if (insn != last
3426 && (set = single_set (insn)) != 0
3427 && SET_DEST (set) == quotient)
3428 set_unique_reg_note (insn,
3429 REG_EQUAL,
3430 gen_rtx_DIV (compute_mode, op0, op1));
3432 break;
3434 fail1:
3435 delete_insns_since (last);
3436 break;
3438 case FLOOR_DIV_EXPR:
3439 case FLOOR_MOD_EXPR:
3440 /* We will come here only for signed operations. */
3441 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3443 unsigned HOST_WIDE_INT mh, ml;
3444 int pre_shift, lgup, post_shift;
3445 HOST_WIDE_INT d = INTVAL (op1);
3447 if (d > 0)
3449 /* We could just as easily deal with negative constants here,
3450 but it does not seem worth the trouble for GCC 2.6. */
3451 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3453 pre_shift = floor_log2 (d);
3454 if (rem_flag)
3456 remainder = expand_binop (compute_mode, and_optab, op0,
3457 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3458 remainder, 0, OPTAB_LIB_WIDEN);
3459 if (remainder)
3460 return gen_lowpart (mode, remainder);
3462 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3463 build_int_2 (pre_shift, 0),
3464 tquotient, 0);
3466 else
3468 rtx t1, t2, t3, t4;
3470 mh = choose_multiplier (d, size, size - 1,
3471 &ml, &post_shift, &lgup);
3472 if (mh)
3473 abort ();
3475 if (post_shift < BITS_PER_WORD
3476 && size - 1 < BITS_PER_WORD)
3478 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3479 build_int_2 (size - 1, 0),
3480 NULL_RTX, 0);
3481 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3482 NULL_RTX, 0, OPTAB_WIDEN);
3483 extra_cost = (shift_cost[post_shift]
3484 + shift_cost[size - 1] + 2 * add_cost);
3485 t3 = expand_mult_highpart (compute_mode, t2, ml,
3486 NULL_RTX, 1,
3487 max_cost - extra_cost);
3488 if (t3 != 0)
3490 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3491 build_int_2 (post_shift, 0),
3492 NULL_RTX, 1);
3493 quotient = expand_binop (compute_mode, xor_optab,
3494 t4, t1, tquotient, 0,
3495 OPTAB_WIDEN);
3500 else
3502 rtx nsign, t1, t2, t3, t4;
3503 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3504 op0, constm1_rtx), NULL_RTX);
3505 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3506 0, OPTAB_WIDEN);
3507 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3508 build_int_2 (size - 1, 0), NULL_RTX, 0);
3509 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3510 NULL_RTX);
3511 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3512 NULL_RTX, 0);
3513 if (t4)
3515 rtx t5;
3516 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3517 NULL_RTX, 0);
3518 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3519 t4, t5),
3520 tquotient);
3525 if (quotient != 0)
3526 break;
3527 delete_insns_since (last);
3529 /* Try using an instruction that produces both the quotient and
3530 remainder, using truncation. We can easily compensate the quotient
3531 or remainder to get floor rounding, once we have the remainder.
3532 Notice that we compute also the final remainder value here,
3533 and return the result right away. */
3534 if (target == 0 || GET_MODE (target) != compute_mode)
3535 target = gen_reg_rtx (compute_mode);
3537 if (rem_flag)
3539 remainder
3540 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3541 quotient = gen_reg_rtx (compute_mode);
3543 else
3545 quotient
3546 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3547 remainder = gen_reg_rtx (compute_mode);
3550 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3551 quotient, remainder, 0))
3553 /* This could be computed with a branch-less sequence.
3554 Save that for later. */
3555 rtx tem;
3556 rtx label = gen_label_rtx ();
3557 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3558 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3559 NULL_RTX, 0, OPTAB_WIDEN);
3560 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3561 expand_dec (quotient, const1_rtx);
3562 expand_inc (remainder, op1);
3563 emit_label (label);
3564 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3567 /* No luck with division elimination or divmod. Have to do it
3568 by conditionally adjusting op0 *and* the result. */
3570 rtx label1, label2, label3, label4, label5;
3571 rtx adjusted_op0;
3572 rtx tem;
3574 quotient = gen_reg_rtx (compute_mode);
3575 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3576 label1 = gen_label_rtx ();
3577 label2 = gen_label_rtx ();
3578 label3 = gen_label_rtx ();
3579 label4 = gen_label_rtx ();
3580 label5 = gen_label_rtx ();
3581 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3582 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3583 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3584 quotient, 0, OPTAB_LIB_WIDEN);
3585 if (tem != quotient)
3586 emit_move_insn (quotient, tem);
3587 emit_jump_insn (gen_jump (label5));
3588 emit_barrier ();
3589 emit_label (label1);
3590 expand_inc (adjusted_op0, const1_rtx);
3591 emit_jump_insn (gen_jump (label4));
3592 emit_barrier ();
3593 emit_label (label2);
3594 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3595 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3596 quotient, 0, OPTAB_LIB_WIDEN);
3597 if (tem != quotient)
3598 emit_move_insn (quotient, tem);
3599 emit_jump_insn (gen_jump (label5));
3600 emit_barrier ();
3601 emit_label (label3);
3602 expand_dec (adjusted_op0, const1_rtx);
3603 emit_label (label4);
3604 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3605 quotient, 0, OPTAB_LIB_WIDEN);
3606 if (tem != quotient)
3607 emit_move_insn (quotient, tem);
3608 expand_dec (quotient, const1_rtx);
3609 emit_label (label5);
3611 break;
3613 case CEIL_DIV_EXPR:
3614 case CEIL_MOD_EXPR:
3615 if (unsignedp)
3617 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3619 rtx t1, t2, t3;
3620 unsigned HOST_WIDE_INT d = INTVAL (op1);
3621 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3622 build_int_2 (floor_log2 (d), 0),
3623 tquotient, 1);
3624 t2 = expand_binop (compute_mode, and_optab, op0,
3625 GEN_INT (d - 1),
3626 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3627 t3 = gen_reg_rtx (compute_mode);
3628 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3629 compute_mode, 1, 1);
3630 if (t3 == 0)
3632 rtx lab;
3633 lab = gen_label_rtx ();
3634 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3635 expand_inc (t1, const1_rtx);
3636 emit_label (lab);
3637 quotient = t1;
3639 else
3640 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3641 t1, t3),
3642 tquotient);
3643 break;
3646 /* Try using an instruction that produces both the quotient and
3647 remainder, using truncation. We can easily compensate the
3648 quotient or remainder to get ceiling rounding, once we have the
3649 remainder. Notice that we compute also the final remainder
3650 value here, and return the result right away. */
3651 if (target == 0 || GET_MODE (target) != compute_mode)
3652 target = gen_reg_rtx (compute_mode);
3654 if (rem_flag)
3656 remainder = (GET_CODE (target) == REG
3657 ? target : gen_reg_rtx (compute_mode));
3658 quotient = gen_reg_rtx (compute_mode);
3660 else
3662 quotient = (GET_CODE (target) == REG
3663 ? target : gen_reg_rtx (compute_mode));
3664 remainder = gen_reg_rtx (compute_mode);
3667 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3668 remainder, 1))
3670 /* This could be computed with a branch-less sequence.
3671 Save that for later. */
3672 rtx label = gen_label_rtx ();
3673 do_cmp_and_jump (remainder, const0_rtx, EQ,
3674 compute_mode, label);
3675 expand_inc (quotient, const1_rtx);
3676 expand_dec (remainder, op1);
3677 emit_label (label);
3678 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3681 /* No luck with division elimination or divmod. Have to do it
3682 by conditionally adjusting op0 *and* the result. */
3684 rtx label1, label2;
3685 rtx adjusted_op0, tem;
3687 quotient = gen_reg_rtx (compute_mode);
3688 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3689 label1 = gen_label_rtx ();
3690 label2 = gen_label_rtx ();
3691 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3692 compute_mode, label1);
3693 emit_move_insn (quotient, const0_rtx);
3694 emit_jump_insn (gen_jump (label2));
3695 emit_barrier ();
3696 emit_label (label1);
3697 expand_dec (adjusted_op0, const1_rtx);
3698 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3699 quotient, 1, OPTAB_LIB_WIDEN);
3700 if (tem != quotient)
3701 emit_move_insn (quotient, tem);
3702 expand_inc (quotient, const1_rtx);
3703 emit_label (label2);
3706 else /* signed */
3708 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3709 && INTVAL (op1) >= 0)
3711 /* This is extremely similar to the code for the unsigned case
3712 above. For 2.7 we should merge these variants, but for
3713 2.6.1 I don't want to touch the code for unsigned since that
3714 get used in C. The signed case will only be used by other
3715 languages (Ada). */
3717 rtx t1, t2, t3;
3718 unsigned HOST_WIDE_INT d = INTVAL (op1);
3719 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3720 build_int_2 (floor_log2 (d), 0),
3721 tquotient, 0);
3722 t2 = expand_binop (compute_mode, and_optab, op0,
3723 GEN_INT (d - 1),
3724 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3725 t3 = gen_reg_rtx (compute_mode);
3726 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3727 compute_mode, 1, 1);
3728 if (t3 == 0)
3730 rtx lab;
3731 lab = gen_label_rtx ();
3732 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3733 expand_inc (t1, const1_rtx);
3734 emit_label (lab);
3735 quotient = t1;
3737 else
3738 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3739 t1, t3),
3740 tquotient);
3741 break;
3744 /* Try using an instruction that produces both the quotient and
3745 remainder, using truncation. We can easily compensate the
3746 quotient or remainder to get ceiling rounding, once we have the
3747 remainder. Notice that we compute also the final remainder
3748 value here, and return the result right away. */
3749 if (target == 0 || GET_MODE (target) != compute_mode)
3750 target = gen_reg_rtx (compute_mode);
3751 if (rem_flag)
3753 remainder= (GET_CODE (target) == REG
3754 ? target : gen_reg_rtx (compute_mode));
3755 quotient = gen_reg_rtx (compute_mode);
3757 else
3759 quotient = (GET_CODE (target) == REG
3760 ? target : gen_reg_rtx (compute_mode));
3761 remainder = gen_reg_rtx (compute_mode);
3764 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3765 remainder, 0))
3767 /* This could be computed with a branch-less sequence.
3768 Save that for later. */
3769 rtx tem;
3770 rtx label = gen_label_rtx ();
3771 do_cmp_and_jump (remainder, const0_rtx, EQ,
3772 compute_mode, label);
3773 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3774 NULL_RTX, 0, OPTAB_WIDEN);
3775 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3776 expand_inc (quotient, const1_rtx);
3777 expand_dec (remainder, op1);
3778 emit_label (label);
3779 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3782 /* No luck with division elimination or divmod. Have to do it
3783 by conditionally adjusting op0 *and* the result. */
3785 rtx label1, label2, label3, label4, label5;
3786 rtx adjusted_op0;
3787 rtx tem;
3789 quotient = gen_reg_rtx (compute_mode);
3790 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3791 label1 = gen_label_rtx ();
3792 label2 = gen_label_rtx ();
3793 label3 = gen_label_rtx ();
3794 label4 = gen_label_rtx ();
3795 label5 = gen_label_rtx ();
3796 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3797 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3798 compute_mode, label1);
3799 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3800 quotient, 0, OPTAB_LIB_WIDEN);
3801 if (tem != quotient)
3802 emit_move_insn (quotient, tem);
3803 emit_jump_insn (gen_jump (label5));
3804 emit_barrier ();
3805 emit_label (label1);
3806 expand_dec (adjusted_op0, const1_rtx);
3807 emit_jump_insn (gen_jump (label4));
3808 emit_barrier ();
3809 emit_label (label2);
3810 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3811 compute_mode, label3);
3812 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3813 quotient, 0, OPTAB_LIB_WIDEN);
3814 if (tem != quotient)
3815 emit_move_insn (quotient, tem);
3816 emit_jump_insn (gen_jump (label5));
3817 emit_barrier ();
3818 emit_label (label3);
3819 expand_inc (adjusted_op0, const1_rtx);
3820 emit_label (label4);
3821 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3822 quotient, 0, OPTAB_LIB_WIDEN);
3823 if (tem != quotient)
3824 emit_move_insn (quotient, tem);
3825 expand_inc (quotient, const1_rtx);
3826 emit_label (label5);
3829 break;
3831 case EXACT_DIV_EXPR:
3832 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3834 HOST_WIDE_INT d = INTVAL (op1);
3835 unsigned HOST_WIDE_INT ml;
3836 int pre_shift;
3837 rtx t1;
3839 pre_shift = floor_log2 (d & -d);
3840 ml = invert_mod2n (d >> pre_shift, size);
3841 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3842 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
3843 quotient = expand_mult (compute_mode, t1, GEN_INT (ml), NULL_RTX,
3846 insn = get_last_insn ();
3847 set_unique_reg_note (insn,
3848 REG_EQUAL,
3849 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3850 compute_mode,
3851 op0, op1));
3853 break;
3855 case ROUND_DIV_EXPR:
3856 case ROUND_MOD_EXPR:
3857 if (unsignedp)
3859 rtx tem;
3860 rtx label;
3861 label = gen_label_rtx ();
3862 quotient = gen_reg_rtx (compute_mode);
3863 remainder = gen_reg_rtx (compute_mode);
3864 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3866 rtx tem;
3867 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3868 quotient, 1, OPTAB_LIB_WIDEN);
3869 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3870 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3871 remainder, 1, OPTAB_LIB_WIDEN);
3873 tem = plus_constant (op1, -1);
3874 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3875 build_int_2 (1, 0), NULL_RTX, 1);
3876 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3877 expand_inc (quotient, const1_rtx);
3878 expand_dec (remainder, op1);
3879 emit_label (label);
3881 else
3883 rtx abs_rem, abs_op1, tem, mask;
3884 rtx label;
3885 label = gen_label_rtx ();
3886 quotient = gen_reg_rtx (compute_mode);
3887 remainder = gen_reg_rtx (compute_mode);
3888 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3890 rtx tem;
3891 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3892 quotient, 0, OPTAB_LIB_WIDEN);
3893 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3894 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3895 remainder, 0, OPTAB_LIB_WIDEN);
3897 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
3898 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
3899 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3900 build_int_2 (1, 0), NULL_RTX, 1);
3901 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3902 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3903 NULL_RTX, 0, OPTAB_WIDEN);
3904 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3905 build_int_2 (size - 1, 0), NULL_RTX, 0);
3906 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3907 NULL_RTX, 0, OPTAB_WIDEN);
3908 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3909 NULL_RTX, 0, OPTAB_WIDEN);
3910 expand_inc (quotient, tem);
3911 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3912 NULL_RTX, 0, OPTAB_WIDEN);
3913 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3914 NULL_RTX, 0, OPTAB_WIDEN);
3915 expand_dec (remainder, tem);
3916 emit_label (label);
3918 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3920 default:
3921 abort ();
3924 if (quotient == 0)
3926 if (target && GET_MODE (target) != compute_mode)
3927 target = 0;
3929 if (rem_flag)
3931 /* Try to produce the remainder without producing the quotient.
3932 If we seem to have a divmod patten that does not require widening,
3933 don't try windening here. We should really have an WIDEN argument
3934 to expand_twoval_binop, since what we'd really like to do here is
3935 1) try a mod insn in compute_mode
3936 2) try a divmod insn in compute_mode
3937 3) try a div insn in compute_mode and multiply-subtract to get
3938 remainder
3939 4) try the same things with widening allowed. */
3940 remainder
3941 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3942 op0, op1, target,
3943 unsignedp,
3944 ((optab2->handlers[(int) compute_mode].insn_code
3945 != CODE_FOR_nothing)
3946 ? OPTAB_DIRECT : OPTAB_WIDEN));
3947 if (remainder == 0)
3949 /* No luck there. Can we do remainder and divide at once
3950 without a library call? */
3951 remainder = gen_reg_rtx (compute_mode);
3952 if (! expand_twoval_binop ((unsignedp
3953 ? udivmod_optab
3954 : sdivmod_optab),
3955 op0, op1,
3956 NULL_RTX, remainder, unsignedp))
3957 remainder = 0;
3960 if (remainder)
3961 return gen_lowpart (mode, remainder);
3964 /* Produce the quotient. Try a quotient insn, but not a library call.
3965 If we have a divmod in this mode, use it in preference to widening
3966 the div (for this test we assume it will not fail). Note that optab2
3967 is set to the one of the two optabs that the call below will use. */
3968 quotient
3969 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3970 op0, op1, rem_flag ? NULL_RTX : target,
3971 unsignedp,
3972 ((optab2->handlers[(int) compute_mode].insn_code
3973 != CODE_FOR_nothing)
3974 ? OPTAB_DIRECT : OPTAB_WIDEN));
3976 if (quotient == 0)
3978 /* No luck there. Try a quotient-and-remainder insn,
3979 keeping the quotient alone. */
3980 quotient = gen_reg_rtx (compute_mode);
3981 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3982 op0, op1,
3983 quotient, NULL_RTX, unsignedp))
3985 quotient = 0;
3986 if (! rem_flag)
3987 /* Still no luck. If we are not computing the remainder,
3988 use a library call for the quotient. */
3989 quotient = sign_expand_binop (compute_mode,
3990 udiv_optab, sdiv_optab,
3991 op0, op1, target,
3992 unsignedp, OPTAB_LIB_WIDEN);
3997 if (rem_flag)
3999 if (target && GET_MODE (target) != compute_mode)
4000 target = 0;
4002 if (quotient == 0)
4003 /* No divide instruction either. Use library for remainder. */
4004 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4005 op0, op1, target,
4006 unsignedp, OPTAB_LIB_WIDEN);
4007 else
4009 /* We divided. Now finish doing X - Y * (X / Y). */
4010 remainder = expand_mult (compute_mode, quotient, op1,
4011 NULL_RTX, unsignedp);
4012 remainder = expand_binop (compute_mode, sub_optab, op0,
4013 remainder, target, unsignedp,
4014 OPTAB_LIB_WIDEN);
4018 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4021 /* Return a tree node with data type TYPE, describing the value of X.
4022 Usually this is an RTL_EXPR, if there is no obvious better choice.
4023 X may be an expression, however we only support those expressions
4024 generated by loop.c. */
4026 tree
4027 make_tree (type, x)
4028 tree type;
4029 rtx x;
4031 tree t;
4033 switch (GET_CODE (x))
4035 case CONST_INT:
4036 t = build_int_2 (INTVAL (x),
4037 (TREE_UNSIGNED (type)
4038 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
4039 || INTVAL (x) >= 0 ? 0 : -1);
4040 TREE_TYPE (t) = type;
4041 return t;
4043 case CONST_DOUBLE:
4044 if (GET_MODE (x) == VOIDmode)
4046 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4047 TREE_TYPE (t) = type;
4049 else
4051 REAL_VALUE_TYPE d;
4053 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4054 t = build_real (type, d);
4057 return t;
4059 case PLUS:
4060 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4061 make_tree (type, XEXP (x, 1))));
4063 case MINUS:
4064 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4065 make_tree (type, XEXP (x, 1))));
4067 case NEG:
4068 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4070 case MULT:
4071 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4072 make_tree (type, XEXP (x, 1))));
4074 case ASHIFT:
4075 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4076 make_tree (type, XEXP (x, 1))));
4078 case LSHIFTRT:
4079 return fold (convert (type,
4080 build (RSHIFT_EXPR, unsigned_type (type),
4081 make_tree (unsigned_type (type),
4082 XEXP (x, 0)),
4083 make_tree (type, XEXP (x, 1)))));
4085 case ASHIFTRT:
4086 return fold (convert (type,
4087 build (RSHIFT_EXPR, signed_type (type),
4088 make_tree (signed_type (type), XEXP (x, 0)),
4089 make_tree (type, XEXP (x, 1)))));
4091 case DIV:
4092 if (TREE_CODE (type) != REAL_TYPE)
4093 t = signed_type (type);
4094 else
4095 t = type;
4097 return fold (convert (type,
4098 build (TRUNC_DIV_EXPR, t,
4099 make_tree (t, XEXP (x, 0)),
4100 make_tree (t, XEXP (x, 1)))));
4101 case UDIV:
4102 t = unsigned_type (type);
4103 return fold (convert (type,
4104 build (TRUNC_DIV_EXPR, t,
4105 make_tree (t, XEXP (x, 0)),
4106 make_tree (t, XEXP (x, 1)))));
4107 default:
4108 t = make_node (RTL_EXPR);
4109 TREE_TYPE (t) = type;
4111 #ifdef POINTERS_EXTEND_UNSIGNED
4112 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4113 ptr_mode. So convert. */
4114 if (POINTER_TYPE_P (type) && GET_MODE (x) != TYPE_MODE (type))
4115 x = convert_memory_address (TYPE_MODE (type), x);
4116 #endif
4118 RTL_EXPR_RTL (t) = x;
4119 /* There are no insns to be output
4120 when this rtl_expr is used. */
4121 RTL_EXPR_SEQUENCE (t) = 0;
4122 return t;
4126 /* Return an rtx representing the value of X * MULT + ADD.
4127 TARGET is a suggestion for where to store the result (an rtx).
4128 MODE is the machine mode for the computation.
4129 X and MULT must have mode MODE. ADD may have a different mode.
4130 So can X (defaults to same as MODE).
4131 UNSIGNEDP is non-zero to do unsigned multiplication.
4132 This may emit insns. */
4135 expand_mult_add (x, target, mult, add, mode, unsignedp)
4136 rtx x, target, mult, add;
4137 enum machine_mode mode;
4138 int unsignedp;
4140 tree type = type_for_mode (mode, unsignedp);
4141 tree add_type = (GET_MODE (add) == VOIDmode
4142 ? type : type_for_mode (GET_MODE (add), unsignedp));
4143 tree result = fold (build (PLUS_EXPR, type,
4144 fold (build (MULT_EXPR, type,
4145 make_tree (type, x),
4146 make_tree (type, mult))),
4147 make_tree (add_type, add)));
4149 return expand_expr (result, target, VOIDmode, 0);
4152 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4153 and returning TARGET.
4155 If TARGET is 0, a pseudo-register or constant is returned. */
4158 expand_and (op0, op1, target)
4159 rtx op0, op1, target;
4161 enum machine_mode mode = VOIDmode;
4162 rtx tem;
4164 if (GET_MODE (op0) != VOIDmode)
4165 mode = GET_MODE (op0);
4166 else if (GET_MODE (op1) != VOIDmode)
4167 mode = GET_MODE (op1);
4169 if (mode != VOIDmode)
4170 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4171 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
4172 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
4173 else
4174 abort ();
4176 if (target == 0)
4177 target = tem;
4178 else if (tem != target)
4179 emit_move_insn (target, tem);
4180 return target;
4183 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4184 and storing in TARGET. Normally return TARGET.
4185 Return 0 if that cannot be done.
4187 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4188 it is VOIDmode, they cannot both be CONST_INT.
4190 UNSIGNEDP is for the case where we have to widen the operands
4191 to perform the operation. It says to use zero-extension.
4193 NORMALIZEP is 1 if we should convert the result to be either zero
4194 or one. Normalize is -1 if we should convert the result to be
4195 either zero or -1. If NORMALIZEP is zero, the result will be left
4196 "raw" out of the scc insn. */
4199 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4200 rtx target;
4201 enum rtx_code code;
4202 rtx op0, op1;
4203 enum machine_mode mode;
4204 int unsignedp;
4205 int normalizep;
4207 rtx subtarget;
4208 enum insn_code icode;
4209 enum machine_mode compare_mode;
4210 enum machine_mode target_mode = GET_MODE (target);
4211 rtx tem;
4212 rtx last = get_last_insn ();
4213 rtx pattern, comparison;
4215 if (unsignedp)
4216 code = unsigned_condition (code);
4218 /* If one operand is constant, make it the second one. Only do this
4219 if the other operand is not constant as well. */
4221 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
4222 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
4224 tem = op0;
4225 op0 = op1;
4226 op1 = tem;
4227 code = swap_condition (code);
4230 if (mode == VOIDmode)
4231 mode = GET_MODE (op0);
4233 /* For some comparisons with 1 and -1, we can convert this to
4234 comparisons with zero. This will often produce more opportunities for
4235 store-flag insns. */
4237 switch (code)
4239 case LT:
4240 if (op1 == const1_rtx)
4241 op1 = const0_rtx, code = LE;
4242 break;
4243 case LE:
4244 if (op1 == constm1_rtx)
4245 op1 = const0_rtx, code = LT;
4246 break;
4247 case GE:
4248 if (op1 == const1_rtx)
4249 op1 = const0_rtx, code = GT;
4250 break;
4251 case GT:
4252 if (op1 == constm1_rtx)
4253 op1 = const0_rtx, code = GE;
4254 break;
4255 case GEU:
4256 if (op1 == const1_rtx)
4257 op1 = const0_rtx, code = NE;
4258 break;
4259 case LTU:
4260 if (op1 == const1_rtx)
4261 op1 = const0_rtx, code = EQ;
4262 break;
4263 default:
4264 break;
4267 /* If we are comparing a double-word integer with zero, we can convert
4268 the comparison into one involving a single word. */
4269 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4270 && GET_MODE_CLASS (mode) == MODE_INT
4271 && op1 == const0_rtx)
4273 if (code == EQ || code == NE)
4275 /* Do a logical OR of the two words and compare the result. */
4276 rtx op0h = gen_highpart (word_mode, op0);
4277 rtx op0l = gen_lowpart (word_mode, op0);
4278 rtx op0both = expand_binop (word_mode, ior_optab, op0h, op0l,
4279 NULL_RTX, unsignedp, OPTAB_DIRECT);
4280 if (op0both != 0)
4281 return emit_store_flag (target, code, op0both, op1, word_mode,
4282 unsignedp, normalizep);
4284 else if (code == LT || code == GE)
4285 /* If testing the sign bit, can just test on high word. */
4286 return emit_store_flag (target, code, gen_highpart (word_mode, op0),
4287 op1, word_mode, unsignedp, normalizep);
4290 /* From now on, we won't change CODE, so set ICODE now. */
4291 icode = setcc_gen_code[(int) code];
4293 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4294 complement of A (for GE) and shifting the sign bit to the low bit. */
4295 if (op1 == const0_rtx && (code == LT || code == GE)
4296 && GET_MODE_CLASS (mode) == MODE_INT
4297 && (normalizep || STORE_FLAG_VALUE == 1
4298 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4299 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4300 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4302 subtarget = target;
4304 /* If the result is to be wider than OP0, it is best to convert it
4305 first. If it is to be narrower, it is *incorrect* to convert it
4306 first. */
4307 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4309 op0 = protect_from_queue (op0, 0);
4310 op0 = convert_modes (target_mode, mode, op0, 0);
4311 mode = target_mode;
4314 if (target_mode != mode)
4315 subtarget = 0;
4317 if (code == GE)
4318 op0 = expand_unop (mode, one_cmpl_optab, op0,
4319 ((STORE_FLAG_VALUE == 1 || normalizep)
4320 ? 0 : subtarget), 0);
4322 if (STORE_FLAG_VALUE == 1 || normalizep)
4323 /* If we are supposed to produce a 0/1 value, we want to do
4324 a logical shift from the sign bit to the low-order bit; for
4325 a -1/0 value, we do an arithmetic shift. */
4326 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4327 size_int (GET_MODE_BITSIZE (mode) - 1),
4328 subtarget, normalizep != -1);
4330 if (mode != target_mode)
4331 op0 = convert_modes (target_mode, mode, op0, 0);
4333 return op0;
4336 if (icode != CODE_FOR_nothing)
4338 insn_operand_predicate_fn pred;
4340 /* We think we may be able to do this with a scc insn. Emit the
4341 comparison and then the scc insn.
4343 compare_from_rtx may call emit_queue, which would be deleted below
4344 if the scc insn fails. So call it ourselves before setting LAST.
4345 Likewise for do_pending_stack_adjust. */
4347 emit_queue ();
4348 do_pending_stack_adjust ();
4349 last = get_last_insn ();
4351 comparison
4352 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4353 if (GET_CODE (comparison) == CONST_INT)
4354 return (comparison == const0_rtx ? const0_rtx
4355 : normalizep == 1 ? const1_rtx
4356 : normalizep == -1 ? constm1_rtx
4357 : const_true_rtx);
4359 /* If the code of COMPARISON doesn't match CODE, something is
4360 wrong; we can no longer be sure that we have the operation.
4361 We could handle this case, but it should not happen. */
4363 if (GET_CODE (comparison) != code)
4364 abort ();
4366 /* Get a reference to the target in the proper mode for this insn. */
4367 compare_mode = insn_data[(int) icode].operand[0].mode;
4368 subtarget = target;
4369 pred = insn_data[(int) icode].operand[0].predicate;
4370 if (preserve_subexpressions_p ()
4371 || ! (*pred) (subtarget, compare_mode))
4372 subtarget = gen_reg_rtx (compare_mode);
4374 pattern = GEN_FCN (icode) (subtarget);
4375 if (pattern)
4377 emit_insn (pattern);
4379 /* If we are converting to a wider mode, first convert to
4380 TARGET_MODE, then normalize. This produces better combining
4381 opportunities on machines that have a SIGN_EXTRACT when we are
4382 testing a single bit. This mostly benefits the 68k.
4384 If STORE_FLAG_VALUE does not have the sign bit set when
4385 interpreted in COMPARE_MODE, we can do this conversion as
4386 unsigned, which is usually more efficient. */
4387 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4389 convert_move (target, subtarget,
4390 (GET_MODE_BITSIZE (compare_mode)
4391 <= HOST_BITS_PER_WIDE_INT)
4392 && 0 == (STORE_FLAG_VALUE
4393 & ((HOST_WIDE_INT) 1
4394 << (GET_MODE_BITSIZE (compare_mode) -1))));
4395 op0 = target;
4396 compare_mode = target_mode;
4398 else
4399 op0 = subtarget;
4401 /* If we want to keep subexpressions around, don't reuse our
4402 last target. */
4404 if (preserve_subexpressions_p ())
4405 subtarget = 0;
4407 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4408 we don't have to do anything. */
4409 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4411 /* STORE_FLAG_VALUE might be the most negative number, so write
4412 the comparison this way to avoid a compiler-time warning. */
4413 else if (- normalizep == STORE_FLAG_VALUE)
4414 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4416 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4417 makes it hard to use a value of just the sign bit due to
4418 ANSI integer constant typing rules. */
4419 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4420 && (STORE_FLAG_VALUE
4421 & ((HOST_WIDE_INT) 1
4422 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4423 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4424 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4425 subtarget, normalizep == 1);
4426 else if (STORE_FLAG_VALUE & 1)
4428 op0 = expand_and (op0, const1_rtx, subtarget);
4429 if (normalizep == -1)
4430 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4432 else
4433 abort ();
4435 /* If we were converting to a smaller mode, do the
4436 conversion now. */
4437 if (target_mode != compare_mode)
4439 convert_move (target, op0, 0);
4440 return target;
4442 else
4443 return op0;
4447 delete_insns_since (last);
4449 /* If expensive optimizations, use different pseudo registers for each
4450 insn, instead of reusing the same pseudo. This leads to better CSE,
4451 but slows down the compiler, since there are more pseudos */
4452 subtarget = (!flag_expensive_optimizations
4453 && (target_mode == mode)) ? target : NULL_RTX;
4455 /* If we reached here, we can't do this with a scc insn. However, there
4456 are some comparisons that can be done directly. For example, if
4457 this is an equality comparison of integers, we can try to exclusive-or
4458 (or subtract) the two operands and use a recursive call to try the
4459 comparison with zero. Don't do any of these cases if branches are
4460 very cheap. */
4462 if (BRANCH_COST > 0
4463 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4464 && op1 != const0_rtx)
4466 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4467 OPTAB_WIDEN);
4469 if (tem == 0)
4470 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4471 OPTAB_WIDEN);
4472 if (tem != 0)
4473 tem = emit_store_flag (target, code, tem, const0_rtx,
4474 mode, unsignedp, normalizep);
4475 if (tem == 0)
4476 delete_insns_since (last);
4477 return tem;
4480 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4481 the constant zero. Reject all other comparisons at this point. Only
4482 do LE and GT if branches are expensive since they are expensive on
4483 2-operand machines. */
4485 if (BRANCH_COST == 0
4486 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4487 || (code != EQ && code != NE
4488 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4489 return 0;
4491 /* See what we need to return. We can only return a 1, -1, or the
4492 sign bit. */
4494 if (normalizep == 0)
4496 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4497 normalizep = STORE_FLAG_VALUE;
4499 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4500 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4501 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4503 else
4504 return 0;
4507 /* Try to put the result of the comparison in the sign bit. Assume we can't
4508 do the necessary operation below. */
4510 tem = 0;
4512 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4513 the sign bit set. */
4515 if (code == LE)
4517 /* This is destructive, so SUBTARGET can't be OP0. */
4518 if (rtx_equal_p (subtarget, op0))
4519 subtarget = 0;
4521 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4522 OPTAB_WIDEN);
4523 if (tem)
4524 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4525 OPTAB_WIDEN);
4528 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4529 number of bits in the mode of OP0, minus one. */
4531 if (code == GT)
4533 if (rtx_equal_p (subtarget, op0))
4534 subtarget = 0;
4536 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4537 size_int (GET_MODE_BITSIZE (mode) - 1),
4538 subtarget, 0);
4539 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4540 OPTAB_WIDEN);
4543 if (code == EQ || code == NE)
4545 /* For EQ or NE, one way to do the comparison is to apply an operation
4546 that converts the operand into a positive number if it is non-zero
4547 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4548 for NE we negate. This puts the result in the sign bit. Then we
4549 normalize with a shift, if needed.
4551 Two operations that can do the above actions are ABS and FFS, so try
4552 them. If that doesn't work, and MODE is smaller than a full word,
4553 we can use zero-extension to the wider mode (an unsigned conversion)
4554 as the operation. */
4556 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4557 that is compensated by the subsequent overflow when subtracting
4558 one / negating. */
4560 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4561 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4562 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4563 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4564 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4566 op0 = protect_from_queue (op0, 0);
4567 tem = convert_modes (word_mode, mode, op0, 1);
4568 mode = word_mode;
4571 if (tem != 0)
4573 if (code == EQ)
4574 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4575 0, OPTAB_WIDEN);
4576 else
4577 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4580 /* If we couldn't do it that way, for NE we can "or" the two's complement
4581 of the value with itself. For EQ, we take the one's complement of
4582 that "or", which is an extra insn, so we only handle EQ if branches
4583 are expensive. */
4585 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4587 if (rtx_equal_p (subtarget, op0))
4588 subtarget = 0;
4590 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4591 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4592 OPTAB_WIDEN);
4594 if (tem && code == EQ)
4595 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4599 if (tem && normalizep)
4600 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4601 size_int (GET_MODE_BITSIZE (mode) - 1),
4602 subtarget, normalizep == 1);
4604 if (tem)
4606 if (GET_MODE (tem) != target_mode)
4608 convert_move (target, tem, 0);
4609 tem = target;
4611 else if (!subtarget)
4613 emit_move_insn (target, tem);
4614 tem = target;
4617 else
4618 delete_insns_since (last);
4620 return tem;
4623 /* Like emit_store_flag, but always succeeds. */
4626 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4627 rtx target;
4628 enum rtx_code code;
4629 rtx op0, op1;
4630 enum machine_mode mode;
4631 int unsignedp;
4632 int normalizep;
4634 rtx tem, label;
4636 /* First see if emit_store_flag can do the job. */
4637 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4638 if (tem != 0)
4639 return tem;
4641 if (normalizep == 0)
4642 normalizep = 1;
4644 /* If this failed, we have to do this with set/compare/jump/set code. */
4646 if (GET_CODE (target) != REG
4647 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4648 target = gen_reg_rtx (GET_MODE (target));
4650 emit_move_insn (target, const1_rtx);
4651 label = gen_label_rtx ();
4652 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, 0,
4653 NULL_RTX, label);
4655 emit_move_insn (target, const0_rtx);
4656 emit_label (label);
4658 return target;
4661 /* Perform possibly multi-word comparison and conditional jump to LABEL
4662 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4664 The algorithm is based on the code in expr.c:do_jump.
4666 Note that this does not perform a general comparison. Only variants
4667 generated within expmed.c are correctly handled, others abort (but could
4668 be handled if needed). */
4670 static void
4671 do_cmp_and_jump (arg1, arg2, op, mode, label)
4672 rtx arg1, arg2, label;
4673 enum rtx_code op;
4674 enum machine_mode mode;
4676 /* If this mode is an integer too wide to compare properly,
4677 compare word by word. Rely on cse to optimize constant cases. */
4679 if (GET_MODE_CLASS (mode) == MODE_INT
4680 && ! can_compare_p (op, mode, ccp_jump))
4682 rtx label2 = gen_label_rtx ();
4684 switch (op)
4686 case LTU:
4687 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4688 break;
4690 case LEU:
4691 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4692 break;
4694 case LT:
4695 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4696 break;
4698 case GT:
4699 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4700 break;
4702 case GE:
4703 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4704 break;
4706 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4707 that's the only equality operations we do */
4708 case EQ:
4709 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4710 abort();
4711 do_jump_by_parts_equality_rtx (arg1, label2, label);
4712 break;
4714 case NE:
4715 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4716 abort();
4717 do_jump_by_parts_equality_rtx (arg1, label, label2);
4718 break;
4720 default:
4721 abort();
4724 emit_label (label2);
4726 else
4728 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, 0, label);