2000-12-21 Benjamin Kosnik <bkoz@redhat.com>
[official-gcc.git] / gcc / expmed.c
blob351dfe4e7ffe002cd109f5a55c3b521caa91bb67
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000 Free Software Foundation, Inc.
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "toplev.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "insn-flags.h"
32 #include "insn-codes.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "real.h"
36 #include "recog.h"
38 static void store_fixed_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
39 unsigned HOST_WIDE_INT,
40 unsigned HOST_WIDE_INT, rtx,
41 unsigned int));
42 static void store_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
43 unsigned HOST_WIDE_INT, rtx,
44 unsigned int));
45 static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx,
46 unsigned HOST_WIDE_INT,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT,
49 rtx, int, unsigned int));
50 static rtx mask_rtx PARAMS ((enum machine_mode, int,
51 int, int));
52 static rtx lshift_value PARAMS ((enum machine_mode, rtx,
53 int, int));
54 static rtx extract_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
55 unsigned HOST_WIDE_INT, int,
56 unsigned int));
57 static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
58 enum machine_mode, rtx));
60 /* Non-zero means divides or modulus operations are relatively cheap for
61 powers of two, so don't use branches; emit the operation instead.
62 Usually, this will mean that the MD file will emit non-branch
63 sequences. */
65 static int sdiv_pow2_cheap, smod_pow2_cheap;
67 #ifndef SLOW_UNALIGNED_ACCESS
68 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
69 #endif
71 /* For compilers that support multiple targets with different word sizes,
72 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
73 is the H8/300(H) compiler. */
75 #ifndef MAX_BITS_PER_WORD
76 #define MAX_BITS_PER_WORD BITS_PER_WORD
77 #endif
79 /* Cost of various pieces of RTL. Note that some of these are indexed by
80 shift count and some by mode. */
81 static int add_cost, negate_cost, zero_cost;
82 static int shift_cost[MAX_BITS_PER_WORD];
83 static int shiftadd_cost[MAX_BITS_PER_WORD];
84 static int shiftsub_cost[MAX_BITS_PER_WORD];
85 static int mul_cost[NUM_MACHINE_MODES];
86 static int div_cost[NUM_MACHINE_MODES];
87 static int mul_widen_cost[NUM_MACHINE_MODES];
88 static int mul_highpart_cost[NUM_MACHINE_MODES];
90 void
91 init_expmed ()
93 /* This is "some random pseudo register" for purposes of calling recog
94 to see what insns exist. */
95 rtx reg = gen_rtx_REG (word_mode, 10000);
96 rtx shift_insn, shiftadd_insn, shiftsub_insn;
97 int dummy;
98 int m;
99 enum machine_mode mode, wider_mode;
101 start_sequence ();
103 reg = gen_rtx_REG (word_mode, 10000);
105 zero_cost = rtx_cost (const0_rtx, 0);
106 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
108 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
109 gen_rtx_ASHIFT (word_mode, reg,
110 const0_rtx)));
112 shiftadd_insn
113 = emit_insn (gen_rtx_SET (VOIDmode, reg,
114 gen_rtx_PLUS (word_mode,
115 gen_rtx_MULT (word_mode,
116 reg, const0_rtx),
117 reg)));
119 shiftsub_insn
120 = emit_insn (gen_rtx_SET (VOIDmode, reg,
121 gen_rtx_MINUS (word_mode,
122 gen_rtx_MULT (word_mode,
123 reg, const0_rtx),
124 reg)));
126 init_recog ();
128 shift_cost[0] = 0;
129 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
131 for (m = 1; m < MAX_BITS_PER_WORD; m++)
133 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
135 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
136 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
137 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
139 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
140 = GEN_INT ((HOST_WIDE_INT) 1 << m);
141 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
142 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
144 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
145 = GEN_INT ((HOST_WIDE_INT) 1 << m);
146 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
147 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
150 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
152 sdiv_pow2_cheap
153 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
154 <= 2 * add_cost);
155 smod_pow2_cheap
156 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
157 <= 2 * add_cost);
159 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
160 mode != VOIDmode;
161 mode = GET_MODE_WIDER_MODE (mode))
163 reg = gen_rtx_REG (mode, 10000);
164 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
165 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
166 wider_mode = GET_MODE_WIDER_MODE (mode);
167 if (wider_mode != VOIDmode)
169 mul_widen_cost[(int) wider_mode]
170 = rtx_cost (gen_rtx_MULT (wider_mode,
171 gen_rtx_ZERO_EXTEND (wider_mode, reg),
172 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
173 SET);
174 mul_highpart_cost[(int) mode]
175 = rtx_cost (gen_rtx_TRUNCATE
176 (mode,
177 gen_rtx_LSHIFTRT (wider_mode,
178 gen_rtx_MULT (wider_mode,
179 gen_rtx_ZERO_EXTEND
180 (wider_mode, reg),
181 gen_rtx_ZERO_EXTEND
182 (wider_mode, reg)),
183 GEN_INT (GET_MODE_BITSIZE (mode)))),
184 SET);
188 end_sequence ();
191 /* Return an rtx representing minus the value of X.
192 MODE is the intended mode of the result,
193 useful if X is a CONST_INT. */
196 negate_rtx (mode, x)
197 enum machine_mode mode;
198 rtx x;
200 rtx result = simplify_unary_operation (NEG, mode, x, mode);
202 if (result == 0)
203 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
205 return result;
208 /* Generate code to store value from rtx VALUE
209 into a bit-field within structure STR_RTX
210 containing BITSIZE bits starting at bit BITNUM.
211 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
212 ALIGN is the alignment that STR_RTX is known to have.
213 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
215 /* ??? Note that there are two different ideas here for how
216 to determine the size to count bits within, for a register.
217 One is BITS_PER_WORD, and the other is the size of operand 3
218 of the insv pattern.
220 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
221 else, we use the mode of operand 3. */
224 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
225 rtx str_rtx;
226 unsigned HOST_WIDE_INT bitsize;
227 unsigned HOST_WIDE_INT bitnum;
228 enum machine_mode fieldmode;
229 rtx value;
230 unsigned int align;
231 HOST_WIDE_INT total_size;
233 unsigned int unit
234 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
235 unsigned HOST_WIDE_INT offset = bitnum / unit;
236 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
237 register rtx op0 = str_rtx;
238 #ifdef HAVE_insv
239 unsigned HOST_WIDE_INT insv_bitsize;
240 enum machine_mode op_mode;
242 op_mode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
243 if (op_mode == VOIDmode)
244 op_mode = word_mode;
245 insv_bitsize = GET_MODE_BITSIZE (op_mode);
246 #endif
248 /* It is wrong to have align==0, since every object is aligned at
249 least at a bit boundary. This usually means a bug elsewhere. */
250 if (align == 0)
251 abort ();
253 /* Discount the part of the structure before the desired byte.
254 We need to know how many bytes are safe to reference after it. */
255 if (total_size >= 0)
256 total_size -= (bitpos / BIGGEST_ALIGNMENT
257 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
259 while (GET_CODE (op0) == SUBREG)
261 /* The following line once was done only if WORDS_BIG_ENDIAN,
262 but I think that is a mistake. WORDS_BIG_ENDIAN is
263 meaningful at a much higher level; when structures are copied
264 between memory and regs, the higher-numbered regs
265 always get higher addresses. */
266 offset += SUBREG_WORD (op0);
267 /* We used to adjust BITPOS here, but now we do the whole adjustment
268 right after the loop. */
269 op0 = SUBREG_REG (op0);
272 /* If OP0 is a register, BITPOS must count within a word.
273 But as we have it, it counts within whatever size OP0 now has.
274 On a bigendian machine, these are not the same, so convert. */
275 if (BYTES_BIG_ENDIAN
276 && GET_CODE (op0) != MEM
277 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
278 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
280 value = protect_from_queue (value, 0);
282 if (flag_force_mem)
283 value = force_not_mem (value);
285 /* If the target is a register, overwriting the entire object, or storing
286 a full-word or multi-word field can be done with just a SUBREG.
288 If the target is memory, storing any naturally aligned field can be
289 done with a simple store. For targets that support fast unaligned
290 memory, any naturally sized, unit aligned field can be done directly. */
292 if (bitsize == GET_MODE_BITSIZE (fieldmode)
293 && (GET_CODE (op0) != MEM
294 ? (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
295 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
296 : (! SLOW_UNALIGNED_ACCESS (fieldmode, align)
297 || (offset * BITS_PER_UNIT % bitsize == 0
298 && align % GET_MODE_BITSIZE (fieldmode) == 0)))
299 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0))
301 if (GET_MODE (op0) != fieldmode)
303 if (GET_CODE (op0) == SUBREG)
305 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
306 || GET_MODE_CLASS (fieldmode) == MODE_INT
307 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
308 op0 = SUBREG_REG (op0);
309 else
310 /* Else we've got some float mode source being extracted into
311 a different float mode destination -- this combination of
312 subregs results in Severe Tire Damage. */
313 abort ();
315 if (GET_CODE (op0) == REG)
316 op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
317 else
318 op0 = change_address (op0, fieldmode,
319 plus_constant (XEXP (op0, 0), offset));
321 emit_move_insn (op0, value);
322 return value;
325 /* Make sure we are playing with integral modes. Pun with subregs
326 if we aren't. This must come after the entire register case above,
327 since that case is valid for any mode. The following cases are only
328 valid for integral modes. */
330 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
331 if (imode != GET_MODE (op0))
333 if (GET_CODE (op0) == MEM)
334 op0 = change_address (op0, imode, NULL_RTX);
335 else if (imode != BLKmode)
336 op0 = gen_lowpart (imode, op0);
337 else
338 abort ();
342 /* Storing an lsb-aligned field in a register
343 can be done with a movestrict instruction. */
345 if (GET_CODE (op0) != MEM
346 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
347 && bitsize == GET_MODE_BITSIZE (fieldmode)
348 && (movstrict_optab->handlers[(int) fieldmode].insn_code
349 != CODE_FOR_nothing))
351 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
353 /* Get appropriate low part of the value being stored. */
354 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
355 value = gen_lowpart (fieldmode, value);
356 else if (!(GET_CODE (value) == SYMBOL_REF
357 || GET_CODE (value) == LABEL_REF
358 || GET_CODE (value) == CONST))
359 value = convert_to_mode (fieldmode, value, 0);
361 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
362 value = copy_to_mode_reg (fieldmode, value);
364 if (GET_CODE (op0) == SUBREG)
366 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
367 || GET_MODE_CLASS (fieldmode) == MODE_INT
368 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
369 op0 = SUBREG_REG (op0);
370 else
371 /* Else we've got some float mode source being extracted into
372 a different float mode destination -- this combination of
373 subregs results in Severe Tire Damage. */
374 abort ();
377 emit_insn (GEN_FCN (icode)
378 (gen_rtx_SUBREG (fieldmode, op0, offset), value));
380 return value;
383 /* Handle fields bigger than a word. */
385 if (bitsize > BITS_PER_WORD)
387 /* Here we transfer the words of the field
388 in the order least significant first.
389 This is because the most significant word is the one which may
390 be less than full.
391 However, only do that if the value is not BLKmode. */
393 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
394 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
395 unsigned int i;
397 /* This is the mode we must force value to, so that there will be enough
398 subwords to extract. Note that fieldmode will often (always?) be
399 VOIDmode, because that is what store_field uses to indicate that this
400 is a bit field, but passing VOIDmode to operand_subword_force will
401 result in an abort. */
402 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
404 for (i = 0; i < nwords; i++)
406 /* If I is 0, use the low-order word in both field and target;
407 if I is 1, use the next to lowest word; and so on. */
408 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
409 unsigned int bit_offset = (backwards
410 ? MAX ((int) bitsize - ((int) i + 1)
411 * BITS_PER_WORD,
413 : (int) i * BITS_PER_WORD);
415 store_bit_field (op0, MIN (BITS_PER_WORD,
416 bitsize - i * BITS_PER_WORD),
417 bitnum + bit_offset, word_mode,
418 operand_subword_force (value, wordnum,
419 (GET_MODE (value) == VOIDmode
420 ? fieldmode
421 : GET_MODE (value))),
422 align, total_size);
424 return value;
427 /* From here on we can assume that the field to be stored in is
428 a full-word (whatever type that is), since it is shorter than a word. */
430 /* OFFSET is the number of words or bytes (UNIT says which)
431 from STR_RTX to the first word or byte containing part of the field. */
433 if (GET_CODE (op0) != MEM)
435 if (offset != 0
436 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
438 if (GET_CODE (op0) != REG)
440 /* Since this is a destination (lvalue), we can't copy it to a
441 pseudo. We can trivially remove a SUBREG that does not
442 change the size of the operand. Such a SUBREG may have been
443 added above. Otherwise, abort. */
444 if (GET_CODE (op0) == SUBREG
445 && (GET_MODE_SIZE (GET_MODE (op0))
446 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
447 op0 = SUBREG_REG (op0);
448 else
449 abort ();
451 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
452 op0, offset);
454 offset = 0;
456 else
458 op0 = protect_from_queue (op0, 1);
461 /* If VALUE is a floating-point mode, access it as an integer of the
462 corresponding size. This can occur on a machine with 64 bit registers
463 that uses SFmode for float. This can also occur for unaligned float
464 structure fields. */
465 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
467 if (GET_CODE (value) != REG)
468 value = copy_to_reg (value);
469 value = gen_rtx_SUBREG (word_mode, value, 0);
472 /* Now OFFSET is nonzero only if OP0 is memory
473 and is therefore always measured in bytes. */
475 #ifdef HAVE_insv
476 if (HAVE_insv
477 && GET_MODE (value) != BLKmode
478 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
479 /* Ensure insv's size is wide enough for this field. */
480 && (insv_bitsize >= bitsize)
481 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
482 && (bitsize + bitpos > insv_bitsize)))
484 int xbitpos = bitpos;
485 rtx value1;
486 rtx xop0 = op0;
487 rtx last = get_last_insn ();
488 rtx pat;
489 enum machine_mode maxmode;
490 int save_volatile_ok = volatile_ok;
492 maxmode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
493 if (maxmode == VOIDmode)
494 maxmode = word_mode;
496 volatile_ok = 1;
498 /* If this machine's insv can only insert into a register, copy OP0
499 into a register and save it back later. */
500 /* This used to check flag_force_mem, but that was a serious
501 de-optimization now that flag_force_mem is enabled by -O2. */
502 if (GET_CODE (op0) == MEM
503 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
504 (op0, VOIDmode)))
506 rtx tempreg;
507 enum machine_mode bestmode;
509 /* Get the mode to use for inserting into this field. If OP0 is
510 BLKmode, get the smallest mode consistent with the alignment. If
511 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
512 mode. Otherwise, use the smallest mode containing the field. */
514 if (GET_MODE (op0) == BLKmode
515 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
516 bestmode
517 = get_best_mode (bitsize, bitnum, align, maxmode,
518 MEM_VOLATILE_P (op0));
519 else
520 bestmode = GET_MODE (op0);
522 if (bestmode == VOIDmode
523 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
524 && GET_MODE_BITSIZE (bestmode) > align))
525 goto insv_loses;
527 /* Adjust address to point to the containing unit of that mode. */
528 unit = GET_MODE_BITSIZE (bestmode);
529 /* Compute offset as multiple of this unit, counting in bytes. */
530 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
531 bitpos = bitnum % unit;
532 op0 = change_address (op0, bestmode,
533 plus_constant (XEXP (op0, 0), offset));
535 /* Fetch that unit, store the bitfield in it, then store
536 the unit. */
537 tempreg = copy_to_reg (op0);
538 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
539 align, total_size);
540 emit_move_insn (op0, tempreg);
541 return value;
543 volatile_ok = save_volatile_ok;
545 /* Add OFFSET into OP0's address. */
546 if (GET_CODE (xop0) == MEM)
547 xop0 = change_address (xop0, byte_mode,
548 plus_constant (XEXP (xop0, 0), offset));
550 /* If xop0 is a register, we need it in MAXMODE
551 to make it acceptable to the format of insv. */
552 if (GET_CODE (xop0) == SUBREG)
553 /* We can't just change the mode, because this might clobber op0,
554 and we will need the original value of op0 if insv fails. */
555 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
556 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
557 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
559 /* On big-endian machines, we count bits from the most significant.
560 If the bit field insn does not, we must invert. */
562 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
563 xbitpos = unit - bitsize - xbitpos;
565 /* We have been counting XBITPOS within UNIT.
566 Count instead within the size of the register. */
567 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
568 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
570 unit = GET_MODE_BITSIZE (maxmode);
572 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
573 value1 = value;
574 if (GET_MODE (value) != maxmode)
576 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
578 /* Optimization: Don't bother really extending VALUE
579 if it has all the bits we will actually use. However,
580 if we must narrow it, be sure we do it correctly. */
582 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
584 /* Avoid making subreg of a subreg, or of a mem. */
585 if (GET_CODE (value1) != REG)
586 value1 = copy_to_reg (value1);
587 value1 = gen_rtx_SUBREG (maxmode, value1, 0);
589 else
590 value1 = gen_lowpart (maxmode, value1);
592 else if (!CONSTANT_P (value))
593 /* Parse phase is supposed to make VALUE's data type
594 match that of the component reference, which is a type
595 at least as wide as the field; so VALUE should have
596 a mode that corresponds to that type. */
597 abort ();
600 /* If this machine's insv insists on a register,
601 get VALUE1 into a register. */
602 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
603 (value1, maxmode)))
604 value1 = force_reg (maxmode, value1);
606 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
607 if (pat)
608 emit_insn (pat);
609 else
611 delete_insns_since (last);
612 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
615 else
616 insv_loses:
617 #endif
618 /* Insv is not available; store using shifts and boolean ops. */
619 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
620 return value;
623 /* Use shifts and boolean operations to store VALUE
624 into a bit field of width BITSIZE
625 in a memory location specified by OP0 except offset by OFFSET bytes.
626 (OFFSET must be 0 if OP0 is a register.)
627 The field starts at position BITPOS within the byte.
628 (If OP0 is a register, it may be a full word or a narrower mode,
629 but BITPOS still counts within a full word,
630 which is significant on bigendian machines.)
631 STRUCT_ALIGN is the alignment the structure is known to have.
633 Note that protect_from_queue has already been done on OP0 and VALUE. */
635 static void
636 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
637 register rtx op0;
638 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
639 register rtx value;
640 unsigned int struct_align;
642 register enum machine_mode mode;
643 unsigned int total_bits = BITS_PER_WORD;
644 rtx subtarget, temp;
645 int all_zero = 0;
646 int all_one = 0;
648 if (! SLOW_UNALIGNED_ACCESS (word_mode, struct_align))
649 struct_align = BIGGEST_ALIGNMENT;
651 /* There is a case not handled here:
652 a structure with a known alignment of just a halfword
653 and a field split across two aligned halfwords within the structure.
654 Or likewise a structure with a known alignment of just a byte
655 and a field split across two bytes.
656 Such cases are not supposed to be able to occur. */
658 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
660 if (offset != 0)
661 abort ();
662 /* Special treatment for a bit field split across two registers. */
663 if (bitsize + bitpos > BITS_PER_WORD)
665 store_split_bit_field (op0, bitsize, bitpos,
666 value, BITS_PER_WORD);
667 return;
670 else
672 /* Get the proper mode to use for this field. We want a mode that
673 includes the entire field. If such a mode would be larger than
674 a word, we won't be doing the extraction the normal way. */
676 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
677 struct_align, word_mode,
678 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
680 if (mode == VOIDmode)
682 /* The only way this should occur is if the field spans word
683 boundaries. */
684 store_split_bit_field (op0,
685 bitsize, bitpos + offset * BITS_PER_UNIT,
686 value, struct_align);
687 return;
690 total_bits = GET_MODE_BITSIZE (mode);
692 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
693 be in the range 0 to total_bits-1, and put any excess bytes in
694 OFFSET. */
695 if (bitpos >= total_bits)
697 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
698 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
699 * BITS_PER_UNIT);
702 /* Get ref to an aligned byte, halfword, or word containing the field.
703 Adjust BITPOS to be position within a word,
704 and OFFSET to be the offset of that word.
705 Then alter OP0 to refer to that word. */
706 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
707 offset -= (offset % (total_bits / BITS_PER_UNIT));
708 op0 = change_address (op0, mode,
709 plus_constant (XEXP (op0, 0), offset));
712 mode = GET_MODE (op0);
714 /* Now MODE is either some integral mode for a MEM as OP0,
715 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
716 The bit field is contained entirely within OP0.
717 BITPOS is the starting bit number within OP0.
718 (OP0's mode may actually be narrower than MODE.) */
720 if (BYTES_BIG_ENDIAN)
721 /* BITPOS is the distance between our msb
722 and that of the containing datum.
723 Convert it to the distance from the lsb. */
724 bitpos = total_bits - bitsize - bitpos;
726 /* Now BITPOS is always the distance between our lsb
727 and that of OP0. */
729 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
730 we must first convert its mode to MODE. */
732 if (GET_CODE (value) == CONST_INT)
734 register HOST_WIDE_INT v = INTVAL (value);
736 if (bitsize < HOST_BITS_PER_WIDE_INT)
737 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
739 if (v == 0)
740 all_zero = 1;
741 else if ((bitsize < HOST_BITS_PER_WIDE_INT
742 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
743 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
744 all_one = 1;
746 value = lshift_value (mode, value, bitpos, bitsize);
748 else
750 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
751 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
753 if (GET_MODE (value) != mode)
755 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
756 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
757 value = gen_lowpart (mode, value);
758 else
759 value = convert_to_mode (mode, value, 1);
762 if (must_and)
763 value = expand_binop (mode, and_optab, value,
764 mask_rtx (mode, 0, bitsize, 0),
765 NULL_RTX, 1, OPTAB_LIB_WIDEN);
766 if (bitpos > 0)
767 value = expand_shift (LSHIFT_EXPR, mode, value,
768 build_int_2 (bitpos, 0), NULL_RTX, 1);
771 /* Now clear the chosen bits in OP0,
772 except that if VALUE is -1 we need not bother. */
774 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
776 if (! all_one)
778 temp = expand_binop (mode, and_optab, op0,
779 mask_rtx (mode, bitpos, bitsize, 1),
780 subtarget, 1, OPTAB_LIB_WIDEN);
781 subtarget = temp;
783 else
784 temp = op0;
786 /* Now logical-or VALUE into OP0, unless it is zero. */
788 if (! all_zero)
789 temp = expand_binop (mode, ior_optab, temp, value,
790 subtarget, 1, OPTAB_LIB_WIDEN);
791 if (op0 != temp)
792 emit_move_insn (op0, temp);
795 /* Store a bit field that is split across multiple accessible memory objects.
797 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
798 BITSIZE is the field width; BITPOS the position of its first bit
799 (within the word).
800 VALUE is the value to store.
801 ALIGN is the known alignment of OP0.
802 This is also the size of the memory objects to be used.
804 This does not yet handle fields wider than BITS_PER_WORD. */
806 static void
807 store_split_bit_field (op0, bitsize, bitpos, value, align)
808 rtx op0;
809 unsigned HOST_WIDE_INT bitsize, bitpos;
810 rtx value;
811 unsigned int align;
813 unsigned int unit;
814 unsigned int bitsdone = 0;
816 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
817 much at a time. */
818 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
819 unit = BITS_PER_WORD;
820 else
821 unit = MIN (align, BITS_PER_WORD);
823 /* If VALUE is a constant other than a CONST_INT, get it into a register in
824 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
825 that VALUE might be a floating-point constant. */
826 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
828 rtx word = gen_lowpart_common (word_mode, value);
830 if (word && (value != word))
831 value = word;
832 else
833 value = gen_lowpart_common (word_mode,
834 force_reg (GET_MODE (value) != VOIDmode
835 ? GET_MODE (value)
836 : word_mode, value));
838 else if (GET_CODE (value) == ADDRESSOF)
839 value = copy_to_reg (value);
841 while (bitsdone < bitsize)
843 unsigned HOST_WIDE_INT thissize;
844 rtx part, word;
845 unsigned HOST_WIDE_INT thispos;
846 unsigned HOST_WIDE_INT offset;
848 offset = (bitpos + bitsdone) / unit;
849 thispos = (bitpos + bitsdone) % unit;
851 /* THISSIZE must not overrun a word boundary. Otherwise,
852 store_fixed_bit_field will call us again, and we will mutually
853 recurse forever. */
854 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
855 thissize = MIN (thissize, unit - thispos);
857 if (BYTES_BIG_ENDIAN)
859 int total_bits;
861 /* We must do an endian conversion exactly the same way as it is
862 done in extract_bit_field, so that the two calls to
863 extract_fixed_bit_field will have comparable arguments. */
864 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
865 total_bits = BITS_PER_WORD;
866 else
867 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
869 /* Fetch successively less significant portions. */
870 if (GET_CODE (value) == CONST_INT)
871 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
872 >> (bitsize - bitsdone - thissize))
873 & (((HOST_WIDE_INT) 1 << thissize) - 1));
874 else
875 /* The args are chosen so that the last part includes the
876 lsb. Give extract_bit_field the value it needs (with
877 endianness compensation) to fetch the piece we want.
879 ??? We have no idea what the alignment of VALUE is, so
880 we have to use a guess. */
881 part
882 = extract_fixed_bit_field
883 (word_mode, value, 0, thissize,
884 total_bits - bitsize + bitsdone, NULL_RTX, 1,
885 GET_MODE (value) == VOIDmode
886 ? UNITS_PER_WORD
887 : (GET_MODE (value) == BLKmode
888 ? 1 : GET_MODE_ALIGNMENT (GET_MODE (value))));
890 else
892 /* Fetch successively more significant portions. */
893 if (GET_CODE (value) == CONST_INT)
894 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
895 >> bitsdone)
896 & (((HOST_WIDE_INT) 1 << thissize) - 1));
897 else
898 part
899 = extract_fixed_bit_field
900 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
901 GET_MODE (value) == VOIDmode
902 ? UNITS_PER_WORD
903 : (GET_MODE (value) == BLKmode
904 ? 1 : GET_MODE_ALIGNMENT (GET_MODE (value))));
907 /* If OP0 is a register, then handle OFFSET here.
909 When handling multiword bitfields, extract_bit_field may pass
910 down a word_mode SUBREG of a larger REG for a bitfield that actually
911 crosses a word boundary. Thus, for a SUBREG, we must find
912 the current word starting from the base register. */
913 if (GET_CODE (op0) == SUBREG)
915 word = operand_subword_force (SUBREG_REG (op0),
916 SUBREG_WORD (op0) + offset,
917 GET_MODE (SUBREG_REG (op0)));
918 offset = 0;
920 else if (GET_CODE (op0) == REG)
922 word = operand_subword_force (op0, offset, GET_MODE (op0));
923 offset = 0;
925 else
926 word = op0;
928 /* OFFSET is in UNITs, and UNIT is in bits.
929 store_fixed_bit_field wants offset in bytes. */
930 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
931 thissize, thispos, part, align);
932 bitsdone += thissize;
936 /* Generate code to extract a byte-field from STR_RTX
937 containing BITSIZE bits, starting at BITNUM,
938 and put it in TARGET if possible (if TARGET is nonzero).
939 Regardless of TARGET, we return the rtx for where the value is placed.
940 It may be a QUEUED.
942 STR_RTX is the structure containing the byte (a REG or MEM).
943 UNSIGNEDP is nonzero if this is an unsigned bit field.
944 MODE is the natural mode of the field value once extracted.
945 TMODE is the mode the caller would like the value to have;
946 but the value may be returned with type MODE instead.
948 ALIGN is the alignment that STR_RTX is known to have.
949 TOTAL_SIZE is the size in bytes of the containing structure,
950 or -1 if varying.
952 If a TARGET is specified and we can store in it at no extra cost,
953 we do so, and return TARGET.
954 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
955 if they are equally easy. */
958 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
959 target, mode, tmode, align, total_size)
960 rtx str_rtx;
961 unsigned HOST_WIDE_INT bitsize;
962 unsigned HOST_WIDE_INT bitnum;
963 int unsignedp;
964 rtx target;
965 enum machine_mode mode, tmode;
966 unsigned int align;
967 HOST_WIDE_INT total_size;
969 unsigned int unit
970 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
971 unsigned HOST_WIDE_INT offset = bitnum / unit;
972 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
973 register rtx op0 = str_rtx;
974 rtx spec_target = target;
975 rtx spec_target_subreg = 0;
976 enum machine_mode int_mode;
977 #ifdef HAVE_extv
978 unsigned HOST_WIDE_INT extv_bitsize;
979 enum machine_mode extv_mode;
980 #endif
981 #ifdef HAVE_extzv
982 unsigned HOST_WIDE_INT extzv_bitsize;
983 enum machine_mode extzv_mode;
984 #endif
986 #ifdef HAVE_extv
987 extv_mode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
988 if (extv_mode == VOIDmode)
989 extv_mode = word_mode;
990 extv_bitsize = GET_MODE_BITSIZE (extv_mode);
991 #endif
993 #ifdef HAVE_extzv
994 extzv_mode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
995 if (extzv_mode == VOIDmode)
996 extzv_mode = word_mode;
997 extzv_bitsize = GET_MODE_BITSIZE (extzv_mode);
998 #endif
1000 /* Discount the part of the structure before the desired byte.
1001 We need to know how many bytes are safe to reference after it. */
1002 if (total_size >= 0)
1003 total_size -= (bitpos / BIGGEST_ALIGNMENT
1004 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1006 if (tmode == VOIDmode)
1007 tmode = mode;
1008 while (GET_CODE (op0) == SUBREG)
1010 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
1011 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
1013 offset += SUBREG_WORD (op0);
1015 inner_size = MIN (inner_size, BITS_PER_WORD);
1017 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1019 bitpos += inner_size - outer_size;
1020 if (bitpos > unit)
1022 offset += (bitpos / unit);
1023 bitpos %= unit;
1027 op0 = SUBREG_REG (op0);
1030 /* Make sure we are playing with integral modes. Pun with subregs
1031 if we aren't. */
1033 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1034 if (imode != GET_MODE (op0))
1036 if (GET_CODE (op0) == MEM)
1037 op0 = change_address (op0, imode, NULL_RTX);
1038 else if (imode != BLKmode)
1039 op0 = gen_lowpart (imode, op0);
1040 else
1041 abort ();
1045 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1046 If that's wrong, the solution is to test for it and set TARGET to 0
1047 if needed. */
1049 /* If OP0 is a register, BITPOS must count within a word.
1050 But as we have it, it counts within whatever size OP0 now has.
1051 On a bigendian machine, these are not the same, so convert. */
1052 if (BYTES_BIG_ENDIAN
1053 && GET_CODE (op0) != MEM
1054 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1055 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1057 /* Extracting a full-word or multi-word value
1058 from a structure in a register or aligned memory.
1059 This can be done with just SUBREG.
1060 So too extracting a subword value in
1061 the least significant part of the register. */
1063 if (((GET_CODE (op0) != MEM
1064 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1065 GET_MODE_BITSIZE (GET_MODE (op0))))
1066 || (GET_CODE (op0) == MEM
1067 && (! SLOW_UNALIGNED_ACCESS (mode, align)
1068 || (offset * BITS_PER_UNIT % bitsize == 0
1069 && align % bitsize == 0))))
1070 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1071 && bitpos % BITS_PER_WORD == 0)
1072 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1073 /* ??? The big endian test here is wrong. This is correct
1074 if the value is in a register, and if mode_for_size is not
1075 the same mode as op0. This causes us to get unnecessarily
1076 inefficient code from the Thumb port when -mbig-endian. */
1077 && (BYTES_BIG_ENDIAN
1078 ? bitpos + bitsize == BITS_PER_WORD
1079 : bitpos == 0))))
1081 enum machine_mode mode1
1082 = (VECTOR_MODE_P (tmode) ? mode
1083 : mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0));
1085 if (mode1 != GET_MODE (op0))
1087 if (GET_CODE (op0) == SUBREG)
1089 if (GET_MODE (SUBREG_REG (op0)) == mode1
1090 || GET_MODE_CLASS (mode1) == MODE_INT
1091 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1092 op0 = SUBREG_REG (op0);
1093 else
1094 /* Else we've got some float mode source being extracted into
1095 a different float mode destination -- this combination of
1096 subregs results in Severe Tire Damage. */
1097 abort ();
1099 if (GET_CODE (op0) == REG)
1100 op0 = gen_rtx_SUBREG (mode1, op0, offset);
1101 else
1102 op0 = change_address (op0, mode1,
1103 plus_constant (XEXP (op0, 0), offset));
1105 if (mode1 != mode)
1106 return convert_to_mode (tmode, op0, unsignedp);
1107 return op0;
1110 /* Handle fields bigger than a word. */
1112 if (bitsize > BITS_PER_WORD)
1114 /* Here we transfer the words of the field
1115 in the order least significant first.
1116 This is because the most significant word is the one which may
1117 be less than full. */
1119 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1120 unsigned int i;
1122 if (target == 0 || GET_CODE (target) != REG)
1123 target = gen_reg_rtx (mode);
1125 /* Indicate for flow that the entire target reg is being set. */
1126 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1128 for (i = 0; i < nwords; i++)
1130 /* If I is 0, use the low-order word in both field and target;
1131 if I is 1, use the next to lowest word; and so on. */
1132 /* Word number in TARGET to use. */
1133 unsigned int wordnum
1134 = (WORDS_BIG_ENDIAN
1135 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1136 : i);
1137 /* Offset from start of field in OP0. */
1138 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1139 ? MAX (0, ((int) bitsize - ((int) i + 1)
1140 * (int) BITS_PER_WORD))
1141 : (int) i * BITS_PER_WORD);
1142 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1143 rtx result_part
1144 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1145 bitsize - i * BITS_PER_WORD),
1146 bitnum + bit_offset, 1, target_part, mode,
1147 word_mode, align, total_size);
1149 if (target_part == 0)
1150 abort ();
1152 if (result_part != target_part)
1153 emit_move_insn (target_part, result_part);
1156 if (unsignedp)
1158 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1159 need to be zero'd out. */
1160 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1162 unsigned int i, total_words;
1164 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1165 for (i = nwords; i < total_words; i++)
1167 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1168 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1169 emit_move_insn (target_part, const0_rtx);
1172 return target;
1175 /* Signed bit field: sign-extend with two arithmetic shifts. */
1176 target = expand_shift (LSHIFT_EXPR, mode, target,
1177 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1178 NULL_RTX, 0);
1179 return expand_shift (RSHIFT_EXPR, mode, target,
1180 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1181 NULL_RTX, 0);
1184 /* From here on we know the desired field is smaller than a word. */
1186 /* Check if there is a correspondingly-sized integer field, so we can
1187 safely extract it as one size of integer, if necessary; then
1188 truncate or extend to the size that is wanted; then use SUBREGs or
1189 convert_to_mode to get one of the modes we really wanted. */
1191 int_mode = int_mode_for_mode (tmode);
1192 if (int_mode == BLKmode)
1193 int_mode = int_mode_for_mode (mode);
1194 if (int_mode == BLKmode)
1195 abort(); /* Should probably push op0 out to memory and then
1196 do a load. */
1198 /* OFFSET is the number of words or bytes (UNIT says which)
1199 from STR_RTX to the first word or byte containing part of the field. */
1201 if (GET_CODE (op0) != MEM)
1203 if (offset != 0
1204 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1206 if (GET_CODE (op0) != REG)
1207 op0 = copy_to_reg (op0);
1208 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1209 op0, offset);
1211 offset = 0;
1213 else
1215 op0 = protect_from_queue (str_rtx, 1);
1218 /* Now OFFSET is nonzero only for memory operands. */
1220 if (unsignedp)
1222 #ifdef HAVE_extzv
1223 if (HAVE_extzv
1224 && (extzv_bitsize >= bitsize)
1225 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1226 && (bitsize + bitpos > extzv_bitsize)))
1228 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1229 rtx bitsize_rtx, bitpos_rtx;
1230 rtx last = get_last_insn ();
1231 rtx xop0 = op0;
1232 rtx xtarget = target;
1233 rtx xspec_target = spec_target;
1234 rtx xspec_target_subreg = spec_target_subreg;
1235 rtx pat;
1236 enum machine_mode maxmode;
1238 maxmode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
1239 if (maxmode == VOIDmode)
1240 maxmode = word_mode;
1242 if (GET_CODE (xop0) == MEM)
1244 int save_volatile_ok = volatile_ok;
1245 volatile_ok = 1;
1247 /* Is the memory operand acceptable? */
1248 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1249 (xop0, GET_MODE (xop0))))
1251 /* No, load into a reg and extract from there. */
1252 enum machine_mode bestmode;
1254 /* Get the mode to use for inserting into this field. If
1255 OP0 is BLKmode, get the smallest mode consistent with the
1256 alignment. If OP0 is a non-BLKmode object that is no
1257 wider than MAXMODE, use its mode. Otherwise, use the
1258 smallest mode containing the field. */
1260 if (GET_MODE (xop0) == BLKmode
1261 || (GET_MODE_SIZE (GET_MODE (op0))
1262 > GET_MODE_SIZE (maxmode)))
1263 bestmode = get_best_mode (bitsize, bitnum, align, maxmode,
1264 MEM_VOLATILE_P (xop0));
1265 else
1266 bestmode = GET_MODE (xop0);
1268 if (bestmode == VOIDmode
1269 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
1270 && GET_MODE_BITSIZE (bestmode) > align))
1271 goto extzv_loses;
1273 /* Compute offset as multiple of this unit,
1274 counting in bytes. */
1275 unit = GET_MODE_BITSIZE (bestmode);
1276 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1277 xbitpos = bitnum % unit;
1278 xop0 = change_address (xop0, bestmode,
1279 plus_constant (XEXP (xop0, 0),
1280 xoffset));
1281 /* Fetch it to a register in that size. */
1282 xop0 = force_reg (bestmode, xop0);
1284 /* XBITPOS counts within UNIT, which is what is expected. */
1286 else
1287 /* Get ref to first byte containing part of the field. */
1288 xop0 = change_address (xop0, byte_mode,
1289 plus_constant (XEXP (xop0, 0), xoffset));
1291 volatile_ok = save_volatile_ok;
1294 /* If op0 is a register, we need it in MAXMODE (which is usually
1295 SImode). to make it acceptable to the format of extzv. */
1296 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1297 goto extzv_loses;
1298 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1299 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1301 /* On big-endian machines, we count bits from the most significant.
1302 If the bit field insn does not, we must invert. */
1303 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1304 xbitpos = unit - bitsize - xbitpos;
1306 /* Now convert from counting within UNIT to counting in MAXMODE. */
1307 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1308 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1310 unit = GET_MODE_BITSIZE (maxmode);
1312 if (xtarget == 0
1313 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1314 xtarget = xspec_target = gen_reg_rtx (tmode);
1316 if (GET_MODE (xtarget) != maxmode)
1318 if (GET_CODE (xtarget) == REG)
1320 int wider = (GET_MODE_SIZE (maxmode)
1321 > GET_MODE_SIZE (GET_MODE (xtarget)));
1322 xtarget = gen_lowpart (maxmode, xtarget);
1323 if (wider)
1324 xspec_target_subreg = xtarget;
1326 else
1327 xtarget = gen_reg_rtx (maxmode);
1330 /* If this machine's extzv insists on a register target,
1331 make sure we have one. */
1332 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1333 (xtarget, maxmode)))
1334 xtarget = gen_reg_rtx (maxmode);
1336 bitsize_rtx = GEN_INT (bitsize);
1337 bitpos_rtx = GEN_INT (xbitpos);
1339 pat = gen_extzv (protect_from_queue (xtarget, 1),
1340 xop0, bitsize_rtx, bitpos_rtx);
1341 if (pat)
1343 emit_insn (pat);
1344 target = xtarget;
1345 spec_target = xspec_target;
1346 spec_target_subreg = xspec_target_subreg;
1348 else
1350 delete_insns_since (last);
1351 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1352 bitpos, target, 1, align);
1355 else
1356 extzv_loses:
1357 #endif
1358 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1359 bitpos, target, 1, align);
1361 else
1363 #ifdef HAVE_extv
1364 if (HAVE_extv
1365 && (extv_bitsize >= bitsize)
1366 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1367 && (bitsize + bitpos > extv_bitsize)))
1369 int xbitpos = bitpos, xoffset = offset;
1370 rtx bitsize_rtx, bitpos_rtx;
1371 rtx last = get_last_insn ();
1372 rtx xop0 = op0, xtarget = target;
1373 rtx xspec_target = spec_target;
1374 rtx xspec_target_subreg = spec_target_subreg;
1375 rtx pat;
1376 enum machine_mode maxmode;
1378 maxmode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
1379 if (maxmode == VOIDmode)
1380 maxmode = word_mode;
1382 if (GET_CODE (xop0) == MEM)
1384 /* Is the memory operand acceptable? */
1385 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1386 (xop0, GET_MODE (xop0))))
1388 /* No, load into a reg and extract from there. */
1389 enum machine_mode bestmode;
1391 /* Get the mode to use for inserting into this field. If
1392 OP0 is BLKmode, get the smallest mode consistent with the
1393 alignment. If OP0 is a non-BLKmode object that is no
1394 wider than MAXMODE, use its mode. Otherwise, use the
1395 smallest mode containing the field. */
1397 if (GET_MODE (xop0) == BLKmode
1398 || (GET_MODE_SIZE (GET_MODE (op0))
1399 > GET_MODE_SIZE (maxmode)))
1400 bestmode = get_best_mode (bitsize, bitnum, align, maxmode,
1401 MEM_VOLATILE_P (xop0));
1402 else
1403 bestmode = GET_MODE (xop0);
1405 if (bestmode == VOIDmode
1406 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
1407 && GET_MODE_BITSIZE (bestmode) > align))
1408 goto extv_loses;
1410 /* Compute offset as multiple of this unit,
1411 counting in bytes. */
1412 unit = GET_MODE_BITSIZE (bestmode);
1413 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1414 xbitpos = bitnum % unit;
1415 xop0 = change_address (xop0, bestmode,
1416 plus_constant (XEXP (xop0, 0),
1417 xoffset));
1418 /* Fetch it to a register in that size. */
1419 xop0 = force_reg (bestmode, xop0);
1421 /* XBITPOS counts within UNIT, which is what is expected. */
1423 else
1424 /* Get ref to first byte containing part of the field. */
1425 xop0 = change_address (xop0, byte_mode,
1426 plus_constant (XEXP (xop0, 0), xoffset));
1429 /* If op0 is a register, we need it in MAXMODE (which is usually
1430 SImode) to make it acceptable to the format of extv. */
1431 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1432 goto extv_loses;
1433 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1434 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1436 /* On big-endian machines, we count bits from the most significant.
1437 If the bit field insn does not, we must invert. */
1438 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1439 xbitpos = unit - bitsize - xbitpos;
1441 /* XBITPOS counts within a size of UNIT.
1442 Adjust to count within a size of MAXMODE. */
1443 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1444 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1446 unit = GET_MODE_BITSIZE (maxmode);
1448 if (xtarget == 0
1449 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1450 xtarget = xspec_target = gen_reg_rtx (tmode);
1452 if (GET_MODE (xtarget) != maxmode)
1454 if (GET_CODE (xtarget) == REG)
1456 int wider = (GET_MODE_SIZE (maxmode)
1457 > GET_MODE_SIZE (GET_MODE (xtarget)));
1458 xtarget = gen_lowpart (maxmode, xtarget);
1459 if (wider)
1460 xspec_target_subreg = xtarget;
1462 else
1463 xtarget = gen_reg_rtx (maxmode);
1466 /* If this machine's extv insists on a register target,
1467 make sure we have one. */
1468 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1469 (xtarget, maxmode)))
1470 xtarget = gen_reg_rtx (maxmode);
1472 bitsize_rtx = GEN_INT (bitsize);
1473 bitpos_rtx = GEN_INT (xbitpos);
1475 pat = gen_extv (protect_from_queue (xtarget, 1),
1476 xop0, bitsize_rtx, bitpos_rtx);
1477 if (pat)
1479 emit_insn (pat);
1480 target = xtarget;
1481 spec_target = xspec_target;
1482 spec_target_subreg = xspec_target_subreg;
1484 else
1486 delete_insns_since (last);
1487 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1488 bitpos, target, 0, align);
1491 else
1492 extv_loses:
1493 #endif
1494 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1495 bitpos, target, 0, align);
1497 if (target == spec_target)
1498 return target;
1499 if (target == spec_target_subreg)
1500 return spec_target;
1501 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1503 /* If the target mode is floating-point, first convert to the
1504 integer mode of that size and then access it as a floating-point
1505 value via a SUBREG. */
1506 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1508 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1509 MODE_INT, 0),
1510 target, unsignedp);
1511 if (GET_CODE (target) != REG)
1512 target = copy_to_reg (target);
1513 return gen_rtx_SUBREG (tmode, target, 0);
1515 else
1516 return convert_to_mode (tmode, target, unsignedp);
1518 return target;
1521 /* Extract a bit field using shifts and boolean operations
1522 Returns an rtx to represent the value.
1523 OP0 addresses a register (word) or memory (byte).
1524 BITPOS says which bit within the word or byte the bit field starts in.
1525 OFFSET says how many bytes farther the bit field starts;
1526 it is 0 if OP0 is a register.
1527 BITSIZE says how many bits long the bit field is.
1528 (If OP0 is a register, it may be narrower than a full word,
1529 but BITPOS still counts within a full word,
1530 which is significant on bigendian machines.)
1532 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1533 If TARGET is nonzero, attempts to store the value there
1534 and return TARGET, but this is not guaranteed.
1535 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1537 ALIGN is the alignment that STR_RTX is known to have. */
1539 static rtx
1540 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1541 target, unsignedp, align)
1542 enum machine_mode tmode;
1543 register rtx op0, target;
1544 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
1545 int unsignedp;
1546 unsigned int align;
1548 unsigned int total_bits = BITS_PER_WORD;
1549 enum machine_mode mode;
1551 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1553 /* Special treatment for a bit field split across two registers. */
1554 if (bitsize + bitpos > BITS_PER_WORD)
1555 return extract_split_bit_field (op0, bitsize, bitpos,
1556 unsignedp, align);
1558 else
1560 /* Get the proper mode to use for this field. We want a mode that
1561 includes the entire field. If such a mode would be larger than
1562 a word, we won't be doing the extraction the normal way. */
1564 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT, align,
1565 word_mode,
1566 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1568 if (mode == VOIDmode)
1569 /* The only way this should occur is if the field spans word
1570 boundaries. */
1571 return extract_split_bit_field (op0, bitsize,
1572 bitpos + offset * BITS_PER_UNIT,
1573 unsignedp, align);
1575 total_bits = GET_MODE_BITSIZE (mode);
1577 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1578 be in the range 0 to total_bits-1, and put any excess bytes in
1579 OFFSET. */
1580 if (bitpos >= total_bits)
1582 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1583 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1584 * BITS_PER_UNIT);
1587 /* Get ref to an aligned byte, halfword, or word containing the field.
1588 Adjust BITPOS to be position within a word,
1589 and OFFSET to be the offset of that word.
1590 Then alter OP0 to refer to that word. */
1591 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1592 offset -= (offset % (total_bits / BITS_PER_UNIT));
1593 op0 = change_address (op0, mode,
1594 plus_constant (XEXP (op0, 0), offset));
1597 mode = GET_MODE (op0);
1599 if (BYTES_BIG_ENDIAN)
1601 /* BITPOS is the distance between our msb and that of OP0.
1602 Convert it to the distance from the lsb. */
1604 bitpos = total_bits - bitsize - bitpos;
1607 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1608 We have reduced the big-endian case to the little-endian case. */
1610 if (unsignedp)
1612 if (bitpos)
1614 /* If the field does not already start at the lsb,
1615 shift it so it does. */
1616 tree amount = build_int_2 (bitpos, 0);
1617 /* Maybe propagate the target for the shift. */
1618 /* But not if we will return it--could confuse integrate.c. */
1619 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1620 && !REG_FUNCTION_VALUE_P (target)
1621 ? target : 0);
1622 if (tmode != mode) subtarget = 0;
1623 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1625 /* Convert the value to the desired mode. */
1626 if (mode != tmode)
1627 op0 = convert_to_mode (tmode, op0, 1);
1629 /* Unless the msb of the field used to be the msb when we shifted,
1630 mask out the upper bits. */
1632 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1633 #if 0
1634 #ifdef SLOW_ZERO_EXTEND
1635 /* Always generate an `and' if
1636 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1637 will combine fruitfully with the zero-extend. */
1638 || tmode != mode
1639 #endif
1640 #endif
1642 return expand_binop (GET_MODE (op0), and_optab, op0,
1643 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1644 target, 1, OPTAB_LIB_WIDEN);
1645 return op0;
1648 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1649 then arithmetic-shift its lsb to the lsb of the word. */
1650 op0 = force_reg (mode, op0);
1651 if (mode != tmode)
1652 target = 0;
1654 /* Find the narrowest integer mode that contains the field. */
1656 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1657 mode = GET_MODE_WIDER_MODE (mode))
1658 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1660 op0 = convert_to_mode (mode, op0, 0);
1661 break;
1664 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1666 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1667 /* Maybe propagate the target for the shift. */
1668 /* But not if we will return the result--could confuse integrate.c. */
1669 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1670 && ! REG_FUNCTION_VALUE_P (target)
1671 ? target : 0);
1672 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1675 return expand_shift (RSHIFT_EXPR, mode, op0,
1676 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1677 target, 0);
1680 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1681 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1682 complement of that if COMPLEMENT. The mask is truncated if
1683 necessary to the width of mode MODE. The mask is zero-extended if
1684 BITSIZE+BITPOS is too small for MODE. */
1686 static rtx
1687 mask_rtx (mode, bitpos, bitsize, complement)
1688 enum machine_mode mode;
1689 int bitpos, bitsize, complement;
1691 HOST_WIDE_INT masklow, maskhigh;
1693 if (bitpos < HOST_BITS_PER_WIDE_INT)
1694 masklow = (HOST_WIDE_INT) -1 << bitpos;
1695 else
1696 masklow = 0;
1698 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1699 masklow &= ((unsigned HOST_WIDE_INT) -1
1700 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1702 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1703 maskhigh = -1;
1704 else
1705 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1707 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1708 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1709 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1710 else
1711 maskhigh = 0;
1713 if (complement)
1715 maskhigh = ~maskhigh;
1716 masklow = ~masklow;
1719 return immed_double_const (masklow, maskhigh, mode);
1722 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1723 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1725 static rtx
1726 lshift_value (mode, value, bitpos, bitsize)
1727 enum machine_mode mode;
1728 rtx value;
1729 int bitpos, bitsize;
1731 unsigned HOST_WIDE_INT v = INTVAL (value);
1732 HOST_WIDE_INT low, high;
1734 if (bitsize < HOST_BITS_PER_WIDE_INT)
1735 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1737 if (bitpos < HOST_BITS_PER_WIDE_INT)
1739 low = v << bitpos;
1740 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1742 else
1744 low = 0;
1745 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1748 return immed_double_const (low, high, mode);
1751 /* Extract a bit field that is split across two words
1752 and return an RTX for the result.
1754 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1755 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1756 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1758 ALIGN is the known alignment of OP0. This is also the size of the
1759 memory objects to be used. */
1761 static rtx
1762 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1763 rtx op0;
1764 unsigned HOST_WIDE_INT bitsize, bitpos;
1765 int unsignedp;
1766 unsigned int align;
1768 unsigned int unit;
1769 unsigned int bitsdone = 0;
1770 rtx result = NULL_RTX;
1771 int first = 1;
1773 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1774 much at a time. */
1775 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1776 unit = BITS_PER_WORD;
1777 else
1778 unit = MIN (align, BITS_PER_WORD);
1780 while (bitsdone < bitsize)
1782 unsigned HOST_WIDE_INT thissize;
1783 rtx part, word;
1784 unsigned HOST_WIDE_INT thispos;
1785 unsigned HOST_WIDE_INT offset;
1787 offset = (bitpos + bitsdone) / unit;
1788 thispos = (bitpos + bitsdone) % unit;
1790 /* THISSIZE must not overrun a word boundary. Otherwise,
1791 extract_fixed_bit_field will call us again, and we will mutually
1792 recurse forever. */
1793 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1794 thissize = MIN (thissize, unit - thispos);
1796 /* If OP0 is a register, then handle OFFSET here.
1798 When handling multiword bitfields, extract_bit_field may pass
1799 down a word_mode SUBREG of a larger REG for a bitfield that actually
1800 crosses a word boundary. Thus, for a SUBREG, we must find
1801 the current word starting from the base register. */
1802 if (GET_CODE (op0) == SUBREG)
1804 word = operand_subword_force (SUBREG_REG (op0),
1805 SUBREG_WORD (op0) + offset,
1806 GET_MODE (SUBREG_REG (op0)));
1807 offset = 0;
1809 else if (GET_CODE (op0) == REG)
1811 word = operand_subword_force (op0, offset, GET_MODE (op0));
1812 offset = 0;
1814 else
1815 word = op0;
1817 /* Extract the parts in bit-counting order,
1818 whose meaning is determined by BYTES_PER_UNIT.
1819 OFFSET is in UNITs, and UNIT is in bits.
1820 extract_fixed_bit_field wants offset in bytes. */
1821 part = extract_fixed_bit_field (word_mode, word,
1822 offset * unit / BITS_PER_UNIT,
1823 thissize, thispos, 0, 1, align);
1824 bitsdone += thissize;
1826 /* Shift this part into place for the result. */
1827 if (BYTES_BIG_ENDIAN)
1829 if (bitsize != bitsdone)
1830 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1831 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1833 else
1835 if (bitsdone != thissize)
1836 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1837 build_int_2 (bitsdone - thissize, 0), 0, 1);
1840 if (first)
1841 result = part;
1842 else
1843 /* Combine the parts with bitwise or. This works
1844 because we extracted each part as an unsigned bit field. */
1845 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1846 OPTAB_LIB_WIDEN);
1848 first = 0;
1851 /* Unsigned bit field: we are done. */
1852 if (unsignedp)
1853 return result;
1854 /* Signed bit field: sign-extend with two arithmetic shifts. */
1855 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1856 build_int_2 (BITS_PER_WORD - bitsize, 0),
1857 NULL_RTX, 0);
1858 return expand_shift (RSHIFT_EXPR, word_mode, result,
1859 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1862 /* Add INC into TARGET. */
1864 void
1865 expand_inc (target, inc)
1866 rtx target, inc;
1868 rtx value = expand_binop (GET_MODE (target), add_optab,
1869 target, inc,
1870 target, 0, OPTAB_LIB_WIDEN);
1871 if (value != target)
1872 emit_move_insn (target, value);
1875 /* Subtract DEC from TARGET. */
1877 void
1878 expand_dec (target, dec)
1879 rtx target, dec;
1881 rtx value = expand_binop (GET_MODE (target), sub_optab,
1882 target, dec,
1883 target, 0, OPTAB_LIB_WIDEN);
1884 if (value != target)
1885 emit_move_insn (target, value);
1888 /* Output a shift instruction for expression code CODE,
1889 with SHIFTED being the rtx for the value to shift,
1890 and AMOUNT the tree for the amount to shift by.
1891 Store the result in the rtx TARGET, if that is convenient.
1892 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1893 Return the rtx for where the value is. */
1896 expand_shift (code, mode, shifted, amount, target, unsignedp)
1897 enum tree_code code;
1898 register enum machine_mode mode;
1899 rtx shifted;
1900 tree amount;
1901 register rtx target;
1902 int unsignedp;
1904 register rtx op1, temp = 0;
1905 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1906 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1907 int try;
1909 /* Previously detected shift-counts computed by NEGATE_EXPR
1910 and shifted in the other direction; but that does not work
1911 on all machines. */
1913 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1915 #ifdef SHIFT_COUNT_TRUNCATED
1916 if (SHIFT_COUNT_TRUNCATED)
1918 if (GET_CODE (op1) == CONST_INT
1919 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1920 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1921 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1922 % GET_MODE_BITSIZE (mode));
1923 else if (GET_CODE (op1) == SUBREG
1924 && SUBREG_WORD (op1) == 0)
1925 op1 = SUBREG_REG (op1);
1927 #endif
1929 if (op1 == const0_rtx)
1930 return shifted;
1932 for (try = 0; temp == 0 && try < 3; try++)
1934 enum optab_methods methods;
1936 if (try == 0)
1937 methods = OPTAB_DIRECT;
1938 else if (try == 1)
1939 methods = OPTAB_WIDEN;
1940 else
1941 methods = OPTAB_LIB_WIDEN;
1943 if (rotate)
1945 /* Widening does not work for rotation. */
1946 if (methods == OPTAB_WIDEN)
1947 continue;
1948 else if (methods == OPTAB_LIB_WIDEN)
1950 /* If we have been unable to open-code this by a rotation,
1951 do it as the IOR of two shifts. I.e., to rotate A
1952 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1953 where C is the bitsize of A.
1955 It is theoretically possible that the target machine might
1956 not be able to perform either shift and hence we would
1957 be making two libcalls rather than just the one for the
1958 shift (similarly if IOR could not be done). We will allow
1959 this extremely unlikely lossage to avoid complicating the
1960 code below. */
1962 rtx subtarget = target == shifted ? 0 : target;
1963 rtx temp1;
1964 tree type = TREE_TYPE (amount);
1965 tree new_amount = make_tree (type, op1);
1966 tree other_amount
1967 = fold (build (MINUS_EXPR, type,
1968 convert (type,
1969 build_int_2 (GET_MODE_BITSIZE (mode),
1970 0)),
1971 amount));
1973 shifted = force_reg (mode, shifted);
1975 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1976 mode, shifted, new_amount, subtarget, 1);
1977 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1978 mode, shifted, other_amount, 0, 1);
1979 return expand_binop (mode, ior_optab, temp, temp1, target,
1980 unsignedp, methods);
1983 temp = expand_binop (mode,
1984 left ? rotl_optab : rotr_optab,
1985 shifted, op1, target, unsignedp, methods);
1987 /* If we don't have the rotate, but we are rotating by a constant
1988 that is in range, try a rotate in the opposite direction. */
1990 if (temp == 0 && GET_CODE (op1) == CONST_INT
1991 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
1992 temp = expand_binop (mode,
1993 left ? rotr_optab : rotl_optab,
1994 shifted,
1995 GEN_INT (GET_MODE_BITSIZE (mode)
1996 - INTVAL (op1)),
1997 target, unsignedp, methods);
1999 else if (unsignedp)
2000 temp = expand_binop (mode,
2001 left ? ashl_optab : lshr_optab,
2002 shifted, op1, target, unsignedp, methods);
2004 /* Do arithmetic shifts.
2005 Also, if we are going to widen the operand, we can just as well
2006 use an arithmetic right-shift instead of a logical one. */
2007 if (temp == 0 && ! rotate
2008 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2010 enum optab_methods methods1 = methods;
2012 /* If trying to widen a log shift to an arithmetic shift,
2013 don't accept an arithmetic shift of the same size. */
2014 if (unsignedp)
2015 methods1 = OPTAB_MUST_WIDEN;
2017 /* Arithmetic shift */
2019 temp = expand_binop (mode,
2020 left ? ashl_optab : ashr_optab,
2021 shifted, op1, target, unsignedp, methods1);
2024 /* We used to try extzv here for logical right shifts, but that was
2025 only useful for one machine, the VAX, and caused poor code
2026 generation there for lshrdi3, so the code was deleted and a
2027 define_expand for lshrsi3 was added to vax.md. */
2030 if (temp == 0)
2031 abort ();
2032 return temp;
2035 enum alg_code { alg_zero, alg_m, alg_shift,
2036 alg_add_t_m2, alg_sub_t_m2,
2037 alg_add_factor, alg_sub_factor,
2038 alg_add_t2_m, alg_sub_t2_m,
2039 alg_add, alg_subtract, alg_factor, alg_shiftop };
2041 /* This structure records a sequence of operations.
2042 `ops' is the number of operations recorded.
2043 `cost' is their total cost.
2044 The operations are stored in `op' and the corresponding
2045 logarithms of the integer coefficients in `log'.
2047 These are the operations:
2048 alg_zero total := 0;
2049 alg_m total := multiplicand;
2050 alg_shift total := total * coeff
2051 alg_add_t_m2 total := total + multiplicand * coeff;
2052 alg_sub_t_m2 total := total - multiplicand * coeff;
2053 alg_add_factor total := total * coeff + total;
2054 alg_sub_factor total := total * coeff - total;
2055 alg_add_t2_m total := total * coeff + multiplicand;
2056 alg_sub_t2_m total := total * coeff - multiplicand;
2058 The first operand must be either alg_zero or alg_m. */
2060 struct algorithm
2062 short cost;
2063 short ops;
2064 /* The size of the OP and LOG fields are not directly related to the
2065 word size, but the worst-case algorithms will be if we have few
2066 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2067 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2068 in total wordsize operations. */
2069 enum alg_code op[MAX_BITS_PER_WORD];
2070 char log[MAX_BITS_PER_WORD];
2073 static void synth_mult PARAMS ((struct algorithm *,
2074 unsigned HOST_WIDE_INT,
2075 int));
2076 static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2077 int, int,
2078 unsigned HOST_WIDE_INT *,
2079 int *, int *));
2080 static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
2081 int));
2082 /* Compute and return the best algorithm for multiplying by T.
2083 The algorithm must cost less than cost_limit
2084 If retval.cost >= COST_LIMIT, no algorithm was found and all
2085 other field of the returned struct are undefined. */
2087 static void
2088 synth_mult (alg_out, t, cost_limit)
2089 struct algorithm *alg_out;
2090 unsigned HOST_WIDE_INT t;
2091 int cost_limit;
2093 int m;
2094 struct algorithm *alg_in, *best_alg;
2095 int cost;
2096 unsigned HOST_WIDE_INT q;
2098 /* Indicate that no algorithm is yet found. If no algorithm
2099 is found, this value will be returned and indicate failure. */
2100 alg_out->cost = cost_limit;
2102 if (cost_limit <= 0)
2103 return;
2105 /* t == 1 can be done in zero cost. */
2106 if (t == 1)
2108 alg_out->ops = 1;
2109 alg_out->cost = 0;
2110 alg_out->op[0] = alg_m;
2111 return;
2114 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2115 fail now. */
2116 if (t == 0)
2118 if (zero_cost >= cost_limit)
2119 return;
2120 else
2122 alg_out->ops = 1;
2123 alg_out->cost = zero_cost;
2124 alg_out->op[0] = alg_zero;
2125 return;
2129 /* We'll be needing a couple extra algorithm structures now. */
2131 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2132 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2134 /* If we have a group of zero bits at the low-order part of T, try
2135 multiplying by the remaining bits and then doing a shift. */
2137 if ((t & 1) == 0)
2139 m = floor_log2 (t & -t); /* m = number of low zero bits */
2140 q = t >> m;
2141 cost = shift_cost[m];
2142 synth_mult (alg_in, q, cost_limit - cost);
2144 cost += alg_in->cost;
2145 if (cost < cost_limit)
2147 struct algorithm *x;
2148 x = alg_in, alg_in = best_alg, best_alg = x;
2149 best_alg->log[best_alg->ops] = m;
2150 best_alg->op[best_alg->ops] = alg_shift;
2151 cost_limit = cost;
2155 /* If we have an odd number, add or subtract one. */
2156 if ((t & 1) != 0)
2158 unsigned HOST_WIDE_INT w;
2160 for (w = 1; (w & t) != 0; w <<= 1)
2162 /* If T was -1, then W will be zero after the loop. This is another
2163 case where T ends with ...111. Handling this with (T + 1) and
2164 subtract 1 produces slightly better code and results in algorithm
2165 selection much faster than treating it like the ...0111 case
2166 below. */
2167 if (w == 0
2168 || (w > 2
2169 /* Reject the case where t is 3.
2170 Thus we prefer addition in that case. */
2171 && t != 3))
2173 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2175 cost = add_cost;
2176 synth_mult (alg_in, t + 1, cost_limit - cost);
2178 cost += alg_in->cost;
2179 if (cost < cost_limit)
2181 struct algorithm *x;
2182 x = alg_in, alg_in = best_alg, best_alg = x;
2183 best_alg->log[best_alg->ops] = 0;
2184 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2185 cost_limit = cost;
2188 else
2190 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2192 cost = add_cost;
2193 synth_mult (alg_in, t - 1, cost_limit - cost);
2195 cost += alg_in->cost;
2196 if (cost < cost_limit)
2198 struct algorithm *x;
2199 x = alg_in, alg_in = best_alg, best_alg = x;
2200 best_alg->log[best_alg->ops] = 0;
2201 best_alg->op[best_alg->ops] = alg_add_t_m2;
2202 cost_limit = cost;
2207 /* Look for factors of t of the form
2208 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2209 If we find such a factor, we can multiply by t using an algorithm that
2210 multiplies by q, shift the result by m and add/subtract it to itself.
2212 We search for large factors first and loop down, even if large factors
2213 are less probable than small; if we find a large factor we will find a
2214 good sequence quickly, and therefore be able to prune (by decreasing
2215 COST_LIMIT) the search. */
2217 for (m = floor_log2 (t - 1); m >= 2; m--)
2219 unsigned HOST_WIDE_INT d;
2221 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2222 if (t % d == 0 && t > d)
2224 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2225 synth_mult (alg_in, t / d, cost_limit - cost);
2227 cost += alg_in->cost;
2228 if (cost < cost_limit)
2230 struct algorithm *x;
2231 x = alg_in, alg_in = best_alg, best_alg = x;
2232 best_alg->log[best_alg->ops] = m;
2233 best_alg->op[best_alg->ops] = alg_add_factor;
2234 cost_limit = cost;
2236 /* Other factors will have been taken care of in the recursion. */
2237 break;
2240 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2241 if (t % d == 0 && t > d)
2243 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2244 synth_mult (alg_in, t / d, cost_limit - cost);
2246 cost += alg_in->cost;
2247 if (cost < cost_limit)
2249 struct algorithm *x;
2250 x = alg_in, alg_in = best_alg, best_alg = x;
2251 best_alg->log[best_alg->ops] = m;
2252 best_alg->op[best_alg->ops] = alg_sub_factor;
2253 cost_limit = cost;
2255 break;
2259 /* Try shift-and-add (load effective address) instructions,
2260 i.e. do a*3, a*5, a*9. */
2261 if ((t & 1) != 0)
2263 q = t - 1;
2264 q = q & -q;
2265 m = exact_log2 (q);
2266 if (m >= 0)
2268 cost = shiftadd_cost[m];
2269 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2271 cost += alg_in->cost;
2272 if (cost < cost_limit)
2274 struct algorithm *x;
2275 x = alg_in, alg_in = best_alg, best_alg = x;
2276 best_alg->log[best_alg->ops] = m;
2277 best_alg->op[best_alg->ops] = alg_add_t2_m;
2278 cost_limit = cost;
2282 q = t + 1;
2283 q = q & -q;
2284 m = exact_log2 (q);
2285 if (m >= 0)
2287 cost = shiftsub_cost[m];
2288 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2290 cost += alg_in->cost;
2291 if (cost < cost_limit)
2293 struct algorithm *x;
2294 x = alg_in, alg_in = best_alg, best_alg = x;
2295 best_alg->log[best_alg->ops] = m;
2296 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2297 cost_limit = cost;
2302 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2303 we have not found any algorithm. */
2304 if (cost_limit == alg_out->cost)
2305 return;
2307 /* If we are getting a too long sequence for `struct algorithm'
2308 to record, make this search fail. */
2309 if (best_alg->ops == MAX_BITS_PER_WORD)
2310 return;
2312 /* Copy the algorithm from temporary space to the space at alg_out.
2313 We avoid using structure assignment because the majority of
2314 best_alg is normally undefined, and this is a critical function. */
2315 alg_out->ops = best_alg->ops + 1;
2316 alg_out->cost = cost_limit;
2317 memcpy (alg_out->op, best_alg->op,
2318 alg_out->ops * sizeof *alg_out->op);
2319 memcpy (alg_out->log, best_alg->log,
2320 alg_out->ops * sizeof *alg_out->log);
2323 /* Perform a multiplication and return an rtx for the result.
2324 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2325 TARGET is a suggestion for where to store the result (an rtx).
2327 We check specially for a constant integer as OP1.
2328 If you want this check for OP0 as well, then before calling
2329 you should swap the two operands if OP0 would be constant. */
2332 expand_mult (mode, op0, op1, target, unsignedp)
2333 enum machine_mode mode;
2334 register rtx op0, op1, target;
2335 int unsignedp;
2337 rtx const_op1 = op1;
2339 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2340 less than or equal in size to `unsigned int' this doesn't matter.
2341 If the mode is larger than `unsigned int', then synth_mult works only
2342 if the constant value exactly fits in an `unsigned int' without any
2343 truncation. This means that multiplying by negative values does
2344 not work; results are off by 2^32 on a 32 bit machine. */
2346 /* If we are multiplying in DImode, it may still be a win
2347 to try to work with shifts and adds. */
2348 if (GET_CODE (op1) == CONST_DOUBLE
2349 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2350 && HOST_BITS_PER_INT >= BITS_PER_WORD
2351 && CONST_DOUBLE_HIGH (op1) == 0)
2352 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2353 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2354 && GET_CODE (op1) == CONST_INT
2355 && INTVAL (op1) < 0)
2356 const_op1 = 0;
2358 /* We used to test optimize here, on the grounds that it's better to
2359 produce a smaller program when -O is not used.
2360 But this causes such a terrible slowdown sometimes
2361 that it seems better to use synth_mult always. */
2363 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2364 && (unsignedp || ! flag_trapv))
2366 struct algorithm alg;
2367 struct algorithm alg2;
2368 HOST_WIDE_INT val = INTVAL (op1);
2369 HOST_WIDE_INT val_so_far;
2370 rtx insn;
2371 int mult_cost;
2372 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2374 /* Try to do the computation three ways: multiply by the negative of OP1
2375 and then negate, do the multiplication directly, or do multiplication
2376 by OP1 - 1. */
2378 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2379 mult_cost = MIN (12 * add_cost, mult_cost);
2381 synth_mult (&alg, val, mult_cost);
2383 /* This works only if the inverted value actually fits in an
2384 `unsigned int' */
2385 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2387 synth_mult (&alg2, - val,
2388 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2389 if (alg2.cost + negate_cost < alg.cost)
2390 alg = alg2, variant = negate_variant;
2393 /* This proves very useful for division-by-constant. */
2394 synth_mult (&alg2, val - 1,
2395 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2396 if (alg2.cost + add_cost < alg.cost)
2397 alg = alg2, variant = add_variant;
2399 if (alg.cost < mult_cost)
2401 /* We found something cheaper than a multiply insn. */
2402 int opno;
2403 rtx accum, tem;
2404 enum machine_mode nmode;
2406 op0 = protect_from_queue (op0, 0);
2408 /* Avoid referencing memory over and over.
2409 For speed, but also for correctness when mem is volatile. */
2410 if (GET_CODE (op0) == MEM)
2411 op0 = force_reg (mode, op0);
2413 /* ACCUM starts out either as OP0 or as a zero, depending on
2414 the first operation. */
2416 if (alg.op[0] == alg_zero)
2418 accum = copy_to_mode_reg (mode, const0_rtx);
2419 val_so_far = 0;
2421 else if (alg.op[0] == alg_m)
2423 accum = copy_to_mode_reg (mode, op0);
2424 val_so_far = 1;
2426 else
2427 abort ();
2429 for (opno = 1; opno < alg.ops; opno++)
2431 int log = alg.log[opno];
2432 int preserve = preserve_subexpressions_p ();
2433 rtx shift_subtarget = preserve ? 0 : accum;
2434 rtx add_target
2435 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2436 && ! preserve)
2437 ? target : 0;
2438 rtx accum_target = preserve ? 0 : accum;
2440 switch (alg.op[opno])
2442 case alg_shift:
2443 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2444 build_int_2 (log, 0), NULL_RTX, 0);
2445 val_so_far <<= log;
2446 break;
2448 case alg_add_t_m2:
2449 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2450 build_int_2 (log, 0), NULL_RTX, 0);
2451 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2452 add_target
2453 ? add_target : accum_target);
2454 val_so_far += (HOST_WIDE_INT) 1 << log;
2455 break;
2457 case alg_sub_t_m2:
2458 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2459 build_int_2 (log, 0), NULL_RTX, 0);
2460 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2461 add_target
2462 ? add_target : accum_target);
2463 val_so_far -= (HOST_WIDE_INT) 1 << log;
2464 break;
2466 case alg_add_t2_m:
2467 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2468 build_int_2 (log, 0), shift_subtarget,
2470 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2471 add_target
2472 ? add_target : accum_target);
2473 val_so_far = (val_so_far << log) + 1;
2474 break;
2476 case alg_sub_t2_m:
2477 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2478 build_int_2 (log, 0), shift_subtarget,
2480 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2481 add_target
2482 ? add_target : accum_target);
2483 val_so_far = (val_so_far << log) - 1;
2484 break;
2486 case alg_add_factor:
2487 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2488 build_int_2 (log, 0), NULL_RTX, 0);
2489 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2490 add_target
2491 ? add_target : accum_target);
2492 val_so_far += val_so_far << log;
2493 break;
2495 case alg_sub_factor:
2496 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2497 build_int_2 (log, 0), NULL_RTX, 0);
2498 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2499 (add_target ? add_target
2500 : preserve ? 0 : tem));
2501 val_so_far = (val_so_far << log) - val_so_far;
2502 break;
2504 default:
2505 abort ();
2508 /* Write a REG_EQUAL note on the last insn so that we can cse
2509 multiplication sequences. Note that if ACCUM is a SUBREG,
2510 we've set the inner register and must properly indicate
2511 that. */
2513 tem = op0, nmode = mode;
2514 if (GET_CODE (accum) == SUBREG)
2516 nmode = GET_MODE (SUBREG_REG (accum));
2517 tem = gen_lowpart (nmode, op0);
2520 insn = get_last_insn ();
2521 set_unique_reg_note (insn,
2522 REG_EQUAL,
2523 gen_rtx_MULT (nmode, tem,
2524 GEN_INT (val_so_far)));
2527 if (variant == negate_variant)
2529 val_so_far = - val_so_far;
2530 accum = expand_unop (mode, neg_optab, accum, target, 0);
2532 else if (variant == add_variant)
2534 val_so_far = val_so_far + 1;
2535 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2538 if (val != val_so_far)
2539 abort ();
2541 return accum;
2545 /* This used to use umul_optab if unsigned, but for non-widening multiply
2546 there is no difference between signed and unsigned. */
2547 op0 = expand_binop (mode,
2548 ! unsignedp
2549 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2550 ? smulv_optab : smul_optab,
2551 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2552 if (op0 == 0)
2553 abort ();
2554 return op0;
2557 /* Return the smallest n such that 2**n >= X. */
2560 ceil_log2 (x)
2561 unsigned HOST_WIDE_INT x;
2563 return floor_log2 (x - 1) + 1;
2566 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2567 replace division by D, and put the least significant N bits of the result
2568 in *MULTIPLIER_PTR and return the most significant bit.
2570 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2571 needed precision is in PRECISION (should be <= N).
2573 PRECISION should be as small as possible so this function can choose
2574 multiplier more freely.
2576 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2577 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2579 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2580 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2582 static
2583 unsigned HOST_WIDE_INT
2584 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2585 unsigned HOST_WIDE_INT d;
2586 int n;
2587 int precision;
2588 unsigned HOST_WIDE_INT *multiplier_ptr;
2589 int *post_shift_ptr;
2590 int *lgup_ptr;
2592 HOST_WIDE_INT mhigh_hi, mlow_hi;
2593 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2594 int lgup, post_shift;
2595 int pow, pow2;
2596 unsigned HOST_WIDE_INT nl, dummy1;
2597 HOST_WIDE_INT nh, dummy2;
2599 /* lgup = ceil(log2(divisor)); */
2600 lgup = ceil_log2 (d);
2602 if (lgup > n)
2603 abort ();
2605 pow = n + lgup;
2606 pow2 = n + lgup - precision;
2608 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2610 /* We could handle this with some effort, but this case is much better
2611 handled directly with a scc insn, so rely on caller using that. */
2612 abort ();
2615 /* mlow = 2^(N + lgup)/d */
2616 if (pow >= HOST_BITS_PER_WIDE_INT)
2618 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2619 nl = 0;
2621 else
2623 nh = 0;
2624 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2626 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2627 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2629 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2630 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2631 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2632 else
2633 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2634 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2635 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2637 if (mhigh_hi && nh - d >= d)
2638 abort ();
2639 if (mhigh_hi > 1 || mlow_hi > 1)
2640 abort ();
2641 /* assert that mlow < mhigh. */
2642 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2643 abort();
2645 /* If precision == N, then mlow, mhigh exceed 2^N
2646 (but they do not exceed 2^(N+1)). */
2648 /* Reduce to lowest terms */
2649 for (post_shift = lgup; post_shift > 0; post_shift--)
2651 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2652 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2653 if (ml_lo >= mh_lo)
2654 break;
2656 mlow_hi = 0;
2657 mlow_lo = ml_lo;
2658 mhigh_hi = 0;
2659 mhigh_lo = mh_lo;
2662 *post_shift_ptr = post_shift;
2663 *lgup_ptr = lgup;
2664 if (n < HOST_BITS_PER_WIDE_INT)
2666 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2667 *multiplier_ptr = mhigh_lo & mask;
2668 return mhigh_lo >= mask;
2670 else
2672 *multiplier_ptr = mhigh_lo;
2673 return mhigh_hi;
2677 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2678 congruent to 1 (mod 2**N). */
2680 static unsigned HOST_WIDE_INT
2681 invert_mod2n (x, n)
2682 unsigned HOST_WIDE_INT x;
2683 int n;
2685 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2687 /* The algorithm notes that the choice y = x satisfies
2688 x*y == 1 mod 2^3, since x is assumed odd.
2689 Each iteration doubles the number of bits of significance in y. */
2691 unsigned HOST_WIDE_INT mask;
2692 unsigned HOST_WIDE_INT y = x;
2693 int nbit = 3;
2695 mask = (n == HOST_BITS_PER_WIDE_INT
2696 ? ~(unsigned HOST_WIDE_INT) 0
2697 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2699 while (nbit < n)
2701 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2702 nbit *= 2;
2704 return y;
2707 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2708 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2709 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2710 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2711 become signed.
2713 The result is put in TARGET if that is convenient.
2715 MODE is the mode of operation. */
2718 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2719 enum machine_mode mode;
2720 register rtx adj_operand, op0, op1, target;
2721 int unsignedp;
2723 rtx tem;
2724 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2726 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2727 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2728 NULL_RTX, 0);
2729 tem = expand_and (tem, op1, NULL_RTX);
2730 adj_operand
2731 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2732 adj_operand);
2734 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2735 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2736 NULL_RTX, 0);
2737 tem = expand_and (tem, op0, NULL_RTX);
2738 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2739 target);
2741 return target;
2744 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2745 in TARGET if that is convenient, and return where the result is. If the
2746 operation can not be performed, 0 is returned.
2748 MODE is the mode of operation and result.
2750 UNSIGNEDP nonzero means unsigned multiply.
2752 MAX_COST is the total allowed cost for the expanded RTL. */
2755 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2756 enum machine_mode mode;
2757 register rtx op0, target;
2758 unsigned HOST_WIDE_INT cnst1;
2759 int unsignedp;
2760 int max_cost;
2762 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2763 optab mul_highpart_optab;
2764 optab moptab;
2765 rtx tem;
2766 int size = GET_MODE_BITSIZE (mode);
2767 rtx op1, wide_op1;
2769 /* We can't support modes wider than HOST_BITS_PER_INT. */
2770 if (size > HOST_BITS_PER_WIDE_INT)
2771 abort ();
2773 op1 = GEN_INT (cnst1);
2775 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2776 wide_op1 = op1;
2777 else
2778 wide_op1
2779 = immed_double_const (cnst1,
2780 (unsignedp
2781 ? (HOST_WIDE_INT) 0
2782 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2783 wider_mode);
2785 /* expand_mult handles constant multiplication of word_mode
2786 or narrower. It does a poor job for large modes. */
2787 if (size < BITS_PER_WORD
2788 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2790 /* We have to do this, since expand_binop doesn't do conversion for
2791 multiply. Maybe change expand_binop to handle widening multiply? */
2792 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2794 /* We know that this can't have signed overflow, so pretend this is
2795 an unsigned multiply. */
2796 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, 0);
2797 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2798 build_int_2 (size, 0), NULL_RTX, 1);
2799 return convert_modes (mode, wider_mode, tem, unsignedp);
2802 if (target == 0)
2803 target = gen_reg_rtx (mode);
2805 /* Firstly, try using a multiplication insn that only generates the needed
2806 high part of the product, and in the sign flavor of unsignedp. */
2807 if (mul_highpart_cost[(int) mode] < max_cost)
2809 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2810 target = expand_binop (mode, mul_highpart_optab,
2811 op0, op1, target, unsignedp, OPTAB_DIRECT);
2812 if (target)
2813 return target;
2816 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2817 Need to adjust the result after the multiplication. */
2818 if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
2820 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2821 target = expand_binop (mode, mul_highpart_optab,
2822 op0, op1, target, unsignedp, OPTAB_DIRECT);
2823 if (target)
2824 /* We used the wrong signedness. Adjust the result. */
2825 return expand_mult_highpart_adjust (mode, target, op0,
2826 op1, target, unsignedp);
2829 /* Try widening multiplication. */
2830 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2831 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2832 && mul_widen_cost[(int) wider_mode] < max_cost)
2834 op1 = force_reg (mode, op1);
2835 goto try;
2838 /* Try widening the mode and perform a non-widening multiplication. */
2839 moptab = smul_optab;
2840 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2841 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2843 op1 = wide_op1;
2844 goto try;
2847 /* Try widening multiplication of opposite signedness, and adjust. */
2848 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2849 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2850 && (mul_widen_cost[(int) wider_mode]
2851 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2853 rtx regop1 = force_reg (mode, op1);
2854 tem = expand_binop (wider_mode, moptab, op0, regop1,
2855 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2856 if (tem != 0)
2858 /* Extract the high half of the just generated product. */
2859 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2860 build_int_2 (size, 0), NULL_RTX, 1);
2861 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2862 /* We used the wrong signedness. Adjust the result. */
2863 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2864 target, unsignedp);
2868 return 0;
2870 try:
2871 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2872 tem = expand_binop (wider_mode, moptab, op0, op1,
2873 NULL_RTX, unsignedp, OPTAB_WIDEN);
2874 if (tem == 0)
2875 return 0;
2877 /* Extract the high half of the just generated product. */
2878 if (mode == word_mode)
2880 return gen_highpart (mode, tem);
2882 else
2884 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2885 build_int_2 (size, 0), NULL_RTX, 1);
2886 return convert_modes (mode, wider_mode, tem, unsignedp);
2890 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2891 if that is convenient, and returning where the result is.
2892 You may request either the quotient or the remainder as the result;
2893 specify REM_FLAG nonzero to get the remainder.
2895 CODE is the expression code for which kind of division this is;
2896 it controls how rounding is done. MODE is the machine mode to use.
2897 UNSIGNEDP nonzero means do unsigned division. */
2899 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2900 and then correct it by or'ing in missing high bits
2901 if result of ANDI is nonzero.
2902 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2903 This could optimize to a bfexts instruction.
2904 But C doesn't use these operations, so their optimizations are
2905 left for later. */
2906 /* ??? For modulo, we don't actually need the highpart of the first product,
2907 the low part will do nicely. And for small divisors, the second multiply
2908 can also be a low-part only multiply or even be completely left out.
2909 E.g. to calculate the remainder of a division by 3 with a 32 bit
2910 multiply, multiply with 0x55555556 and extract the upper two bits;
2911 the result is exact for inputs up to 0x1fffffff.
2912 The input range can be reduced by using cross-sum rules.
2913 For odd divisors >= 3, the following table gives right shift counts
2914 so that if an number is shifted by an integer multiple of the given
2915 amount, the remainder stays the same:
2916 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2917 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2918 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2919 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2920 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2922 Cross-sum rules for even numbers can be derived by leaving as many bits
2923 to the right alone as the divisor has zeros to the right.
2924 E.g. if x is an unsigned 32 bit number:
2925 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2928 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2931 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2932 int rem_flag;
2933 enum tree_code code;
2934 enum machine_mode mode;
2935 register rtx op0, op1, target;
2936 int unsignedp;
2938 enum machine_mode compute_mode;
2939 register rtx tquotient;
2940 rtx quotient = 0, remainder = 0;
2941 rtx last;
2942 int size;
2943 rtx insn, set;
2944 optab optab1, optab2;
2945 int op1_is_constant, op1_is_pow2;
2946 int max_cost, extra_cost;
2947 static HOST_WIDE_INT last_div_const = 0;
2949 op1_is_constant = GET_CODE (op1) == CONST_INT;
2950 op1_is_pow2 = (op1_is_constant
2951 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2952 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2955 This is the structure of expand_divmod:
2957 First comes code to fix up the operands so we can perform the operations
2958 correctly and efficiently.
2960 Second comes a switch statement with code specific for each rounding mode.
2961 For some special operands this code emits all RTL for the desired
2962 operation, for other cases, it generates only a quotient and stores it in
2963 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2964 to indicate that it has not done anything.
2966 Last comes code that finishes the operation. If QUOTIENT is set and
2967 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2968 QUOTIENT is not set, it is computed using trunc rounding.
2970 We try to generate special code for division and remainder when OP1 is a
2971 constant. If |OP1| = 2**n we can use shifts and some other fast
2972 operations. For other values of OP1, we compute a carefully selected
2973 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2974 by m.
2976 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2977 half of the product. Different strategies for generating the product are
2978 implemented in expand_mult_highpart.
2980 If what we actually want is the remainder, we generate that by another
2981 by-constant multiplication and a subtraction. */
2983 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2984 code below will malfunction if we are, so check here and handle
2985 the special case if so. */
2986 if (op1 == const1_rtx)
2987 return rem_flag ? const0_rtx : op0;
2989 /* When dividing by -1, we could get an overflow.
2990 negv_optab can handle overflows. */
2991 if (! unsignedp && op1 == constm1_rtx)
2993 if (rem_flag)
2994 return const0_rtx;
2995 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
2996 ? negv_optab : neg_optab, op0, target, 0);
2999 if (target
3000 /* Don't use the function value register as a target
3001 since we have to read it as well as write it,
3002 and function-inlining gets confused by this. */
3003 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3004 /* Don't clobber an operand while doing a multi-step calculation. */
3005 || ((rem_flag || op1_is_constant)
3006 && (reg_mentioned_p (target, op0)
3007 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3008 || reg_mentioned_p (target, op1)
3009 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3010 target = 0;
3012 /* Get the mode in which to perform this computation. Normally it will
3013 be MODE, but sometimes we can't do the desired operation in MODE.
3014 If so, pick a wider mode in which we can do the operation. Convert
3015 to that mode at the start to avoid repeated conversions.
3017 First see what operations we need. These depend on the expression
3018 we are evaluating. (We assume that divxx3 insns exist under the
3019 same conditions that modxx3 insns and that these insns don't normally
3020 fail. If these assumptions are not correct, we may generate less
3021 efficient code in some cases.)
3023 Then see if we find a mode in which we can open-code that operation
3024 (either a division, modulus, or shift). Finally, check for the smallest
3025 mode for which we can do the operation with a library call. */
3027 /* We might want to refine this now that we have division-by-constant
3028 optimization. Since expand_mult_highpart tries so many variants, it is
3029 not straightforward to generalize this. Maybe we should make an array
3030 of possible modes in init_expmed? Save this for GCC 2.7. */
3032 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
3033 : (unsignedp ? udiv_optab : sdiv_optab));
3034 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
3036 for (compute_mode = mode; compute_mode != VOIDmode;
3037 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3038 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3039 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3040 break;
3042 if (compute_mode == VOIDmode)
3043 for (compute_mode = mode; compute_mode != VOIDmode;
3044 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3045 if (optab1->handlers[(int) compute_mode].libfunc
3046 || optab2->handlers[(int) compute_mode].libfunc)
3047 break;
3049 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3050 in expand_binop. */
3051 if (compute_mode == VOIDmode)
3052 compute_mode = mode;
3054 if (target && GET_MODE (target) == compute_mode)
3055 tquotient = target;
3056 else
3057 tquotient = gen_reg_rtx (compute_mode);
3059 size = GET_MODE_BITSIZE (compute_mode);
3060 #if 0
3061 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3062 (mode), and thereby get better code when OP1 is a constant. Do that
3063 later. It will require going over all usages of SIZE below. */
3064 size = GET_MODE_BITSIZE (mode);
3065 #endif
3067 /* Only deduct something for a REM if the last divide done was
3068 for a different constant. Then set the constant of the last
3069 divide. */
3070 max_cost = div_cost[(int) compute_mode]
3071 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3072 && INTVAL (op1) == last_div_const)
3073 ? mul_cost[(int) compute_mode] + add_cost : 0);
3075 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3077 /* Now convert to the best mode to use. */
3078 if (compute_mode != mode)
3080 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3081 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3083 /* convert_modes may have placed op1 into a register, so we
3084 must recompute the following. */
3085 op1_is_constant = GET_CODE (op1) == CONST_INT;
3086 op1_is_pow2 = (op1_is_constant
3087 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3088 || (! unsignedp
3089 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3092 /* If one of the operands is a volatile MEM, copy it into a register. */
3094 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3095 op0 = force_reg (compute_mode, op0);
3096 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3097 op1 = force_reg (compute_mode, op1);
3099 /* If we need the remainder or if OP1 is constant, we need to
3100 put OP0 in a register in case it has any queued subexpressions. */
3101 if (rem_flag || op1_is_constant)
3102 op0 = force_reg (compute_mode, op0);
3104 last = get_last_insn ();
3106 /* Promote floor rounding to trunc rounding for unsigned operations. */
3107 if (unsignedp)
3109 if (code == FLOOR_DIV_EXPR)
3110 code = TRUNC_DIV_EXPR;
3111 if (code == FLOOR_MOD_EXPR)
3112 code = TRUNC_MOD_EXPR;
3113 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3114 code = TRUNC_DIV_EXPR;
3117 if (op1 != const0_rtx)
3118 switch (code)
3120 case TRUNC_MOD_EXPR:
3121 case TRUNC_DIV_EXPR:
3122 if (op1_is_constant)
3124 if (unsignedp)
3126 unsigned HOST_WIDE_INT mh, ml;
3127 int pre_shift, post_shift;
3128 int dummy;
3129 unsigned HOST_WIDE_INT d = INTVAL (op1);
3131 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3133 pre_shift = floor_log2 (d);
3134 if (rem_flag)
3136 remainder
3137 = expand_binop (compute_mode, and_optab, op0,
3138 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3139 remainder, 1,
3140 OPTAB_LIB_WIDEN);
3141 if (remainder)
3142 return gen_lowpart (mode, remainder);
3144 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3145 build_int_2 (pre_shift, 0),
3146 tquotient, 1);
3148 else if (size <= HOST_BITS_PER_WIDE_INT)
3150 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3152 /* Most significant bit of divisor is set; emit an scc
3153 insn. */
3154 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3155 compute_mode, 1, 1);
3156 if (quotient == 0)
3157 goto fail1;
3159 else
3161 /* Find a suitable multiplier and right shift count
3162 instead of multiplying with D. */
3164 mh = choose_multiplier (d, size, size,
3165 &ml, &post_shift, &dummy);
3167 /* If the suggested multiplier is more than SIZE bits,
3168 we can do better for even divisors, using an
3169 initial right shift. */
3170 if (mh != 0 && (d & 1) == 0)
3172 pre_shift = floor_log2 (d & -d);
3173 mh = choose_multiplier (d >> pre_shift, size,
3174 size - pre_shift,
3175 &ml, &post_shift, &dummy);
3176 if (mh)
3177 abort ();
3179 else
3180 pre_shift = 0;
3182 if (mh != 0)
3184 rtx t1, t2, t3, t4;
3186 extra_cost = (shift_cost[post_shift - 1]
3187 + shift_cost[1] + 2 * add_cost);
3188 t1 = expand_mult_highpart (compute_mode, op0, ml,
3189 NULL_RTX, 1,
3190 max_cost - extra_cost);
3191 if (t1 == 0)
3192 goto fail1;
3193 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3194 op0, t1),
3195 NULL_RTX);
3196 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3197 build_int_2 (1, 0), NULL_RTX,1);
3198 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3199 t1, t3),
3200 NULL_RTX);
3201 quotient
3202 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3203 build_int_2 (post_shift - 1, 0),
3204 tquotient, 1);
3206 else
3208 rtx t1, t2;
3210 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3211 build_int_2 (pre_shift, 0),
3212 NULL_RTX, 1);
3213 extra_cost = (shift_cost[pre_shift]
3214 + shift_cost[post_shift]);
3215 t2 = expand_mult_highpart (compute_mode, t1, ml,
3216 NULL_RTX, 1,
3217 max_cost - extra_cost);
3218 if (t2 == 0)
3219 goto fail1;
3220 quotient
3221 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3222 build_int_2 (post_shift, 0),
3223 tquotient, 1);
3227 else /* Too wide mode to use tricky code */
3228 break;
3230 insn = get_last_insn ();
3231 if (insn != last
3232 && (set = single_set (insn)) != 0
3233 && SET_DEST (set) == quotient)
3234 set_unique_reg_note (insn,
3235 REG_EQUAL,
3236 gen_rtx_UDIV (compute_mode, op0, op1));
3238 else /* TRUNC_DIV, signed */
3240 unsigned HOST_WIDE_INT ml;
3241 int lgup, post_shift;
3242 HOST_WIDE_INT d = INTVAL (op1);
3243 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3245 /* n rem d = n rem -d */
3246 if (rem_flag && d < 0)
3248 d = abs_d;
3249 op1 = GEN_INT (abs_d);
3252 if (d == 1)
3253 quotient = op0;
3254 else if (d == -1)
3255 quotient = expand_unop (compute_mode, neg_optab, op0,
3256 tquotient, 0);
3257 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3259 /* This case is not handled correctly below. */
3260 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3261 compute_mode, 1, 1);
3262 if (quotient == 0)
3263 goto fail1;
3265 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3266 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3268 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3270 lgup = floor_log2 (abs_d);
3271 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3273 rtx label = gen_label_rtx ();
3274 rtx t1;
3276 t1 = copy_to_mode_reg (compute_mode, op0);
3277 do_cmp_and_jump (t1, const0_rtx, GE,
3278 compute_mode, label);
3279 expand_inc (t1, GEN_INT (abs_d - 1));
3280 emit_label (label);
3281 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3282 build_int_2 (lgup, 0),
3283 tquotient, 0);
3285 else
3287 rtx t1, t2, t3;
3288 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3289 build_int_2 (size - 1, 0),
3290 NULL_RTX, 0);
3291 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3292 build_int_2 (size - lgup, 0),
3293 NULL_RTX, 1);
3294 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3295 op0, t2),
3296 NULL_RTX);
3297 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3298 build_int_2 (lgup, 0),
3299 tquotient, 0);
3302 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3303 the quotient. */
3304 if (d < 0)
3306 insn = get_last_insn ();
3307 if (insn != last
3308 && (set = single_set (insn)) != 0
3309 && SET_DEST (set) == quotient
3310 && abs_d < ((unsigned HOST_WIDE_INT) 1
3311 << (HOST_BITS_PER_WIDE_INT - 1)))
3312 set_unique_reg_note (insn,
3313 REG_EQUAL,
3314 gen_rtx_DIV (compute_mode,
3315 op0,
3316 GEN_INT (abs_d)));
3318 quotient = expand_unop (compute_mode, neg_optab,
3319 quotient, quotient, 0);
3322 else if (size <= HOST_BITS_PER_WIDE_INT)
3324 choose_multiplier (abs_d, size, size - 1,
3325 &ml, &post_shift, &lgup);
3326 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3328 rtx t1, t2, t3;
3330 extra_cost = (shift_cost[post_shift]
3331 + shift_cost[size - 1] + add_cost);
3332 t1 = expand_mult_highpart (compute_mode, op0, ml,
3333 NULL_RTX, 0,
3334 max_cost - extra_cost);
3335 if (t1 == 0)
3336 goto fail1;
3337 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3338 build_int_2 (post_shift, 0), NULL_RTX, 0);
3339 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3340 build_int_2 (size - 1, 0), NULL_RTX, 0);
3341 if (d < 0)
3342 quotient
3343 = force_operand (gen_rtx_MINUS (compute_mode,
3344 t3, t2),
3345 tquotient);
3346 else
3347 quotient
3348 = force_operand (gen_rtx_MINUS (compute_mode,
3349 t2, t3),
3350 tquotient);
3352 else
3354 rtx t1, t2, t3, t4;
3356 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3357 extra_cost = (shift_cost[post_shift]
3358 + shift_cost[size - 1] + 2 * add_cost);
3359 t1 = expand_mult_highpart (compute_mode, op0, ml,
3360 NULL_RTX, 0,
3361 max_cost - extra_cost);
3362 if (t1 == 0)
3363 goto fail1;
3364 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3365 t1, op0),
3366 NULL_RTX);
3367 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3368 build_int_2 (post_shift, 0),
3369 NULL_RTX, 0);
3370 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3371 build_int_2 (size - 1, 0),
3372 NULL_RTX, 0);
3373 if (d < 0)
3374 quotient
3375 = force_operand (gen_rtx_MINUS (compute_mode,
3376 t4, t3),
3377 tquotient);
3378 else
3379 quotient
3380 = force_operand (gen_rtx_MINUS (compute_mode,
3381 t3, t4),
3382 tquotient);
3385 else /* Too wide mode to use tricky code */
3386 break;
3388 insn = get_last_insn ();
3389 if (insn != last
3390 && (set = single_set (insn)) != 0
3391 && SET_DEST (set) == quotient)
3392 set_unique_reg_note (insn,
3393 REG_EQUAL,
3394 gen_rtx_DIV (compute_mode, op0, op1));
3396 break;
3398 fail1:
3399 delete_insns_since (last);
3400 break;
3402 case FLOOR_DIV_EXPR:
3403 case FLOOR_MOD_EXPR:
3404 /* We will come here only for signed operations. */
3405 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3407 unsigned HOST_WIDE_INT mh, ml;
3408 int pre_shift, lgup, post_shift;
3409 HOST_WIDE_INT d = INTVAL (op1);
3411 if (d > 0)
3413 /* We could just as easily deal with negative constants here,
3414 but it does not seem worth the trouble for GCC 2.6. */
3415 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3417 pre_shift = floor_log2 (d);
3418 if (rem_flag)
3420 remainder = expand_binop (compute_mode, and_optab, op0,
3421 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3422 remainder, 0, OPTAB_LIB_WIDEN);
3423 if (remainder)
3424 return gen_lowpart (mode, remainder);
3426 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3427 build_int_2 (pre_shift, 0),
3428 tquotient, 0);
3430 else
3432 rtx t1, t2, t3, t4;
3434 mh = choose_multiplier (d, size, size - 1,
3435 &ml, &post_shift, &lgup);
3436 if (mh)
3437 abort ();
3439 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3440 build_int_2 (size - 1, 0), NULL_RTX, 0);
3441 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3442 NULL_RTX, 0, OPTAB_WIDEN);
3443 extra_cost = (shift_cost[post_shift]
3444 + shift_cost[size - 1] + 2 * add_cost);
3445 t3 = expand_mult_highpart (compute_mode, t2, ml,
3446 NULL_RTX, 1,
3447 max_cost - extra_cost);
3448 if (t3 != 0)
3450 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3451 build_int_2 (post_shift, 0),
3452 NULL_RTX, 1);
3453 quotient = expand_binop (compute_mode, xor_optab,
3454 t4, t1, tquotient, 0,
3455 OPTAB_WIDEN);
3459 else
3461 rtx nsign, t1, t2, t3, t4;
3462 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3463 op0, constm1_rtx), NULL_RTX);
3464 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3465 0, OPTAB_WIDEN);
3466 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3467 build_int_2 (size - 1, 0), NULL_RTX, 0);
3468 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3469 NULL_RTX);
3470 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3471 NULL_RTX, 0);
3472 if (t4)
3474 rtx t5;
3475 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3476 NULL_RTX, 0);
3477 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3478 t4, t5),
3479 tquotient);
3484 if (quotient != 0)
3485 break;
3486 delete_insns_since (last);
3488 /* Try using an instruction that produces both the quotient and
3489 remainder, using truncation. We can easily compensate the quotient
3490 or remainder to get floor rounding, once we have the remainder.
3491 Notice that we compute also the final remainder value here,
3492 and return the result right away. */
3493 if (target == 0 || GET_MODE (target) != compute_mode)
3494 target = gen_reg_rtx (compute_mode);
3496 if (rem_flag)
3498 remainder
3499 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3500 quotient = gen_reg_rtx (compute_mode);
3502 else
3504 quotient
3505 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3506 remainder = gen_reg_rtx (compute_mode);
3509 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3510 quotient, remainder, 0))
3512 /* This could be computed with a branch-less sequence.
3513 Save that for later. */
3514 rtx tem;
3515 rtx label = gen_label_rtx ();
3516 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3517 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3518 NULL_RTX, 0, OPTAB_WIDEN);
3519 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3520 expand_dec (quotient, const1_rtx);
3521 expand_inc (remainder, op1);
3522 emit_label (label);
3523 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3526 /* No luck with division elimination or divmod. Have to do it
3527 by conditionally adjusting op0 *and* the result. */
3529 rtx label1, label2, label3, label4, label5;
3530 rtx adjusted_op0;
3531 rtx tem;
3533 quotient = gen_reg_rtx (compute_mode);
3534 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3535 label1 = gen_label_rtx ();
3536 label2 = gen_label_rtx ();
3537 label3 = gen_label_rtx ();
3538 label4 = gen_label_rtx ();
3539 label5 = gen_label_rtx ();
3540 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3541 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3542 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3543 quotient, 0, OPTAB_LIB_WIDEN);
3544 if (tem != quotient)
3545 emit_move_insn (quotient, tem);
3546 emit_jump_insn (gen_jump (label5));
3547 emit_barrier ();
3548 emit_label (label1);
3549 expand_inc (adjusted_op0, const1_rtx);
3550 emit_jump_insn (gen_jump (label4));
3551 emit_barrier ();
3552 emit_label (label2);
3553 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3554 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3555 quotient, 0, OPTAB_LIB_WIDEN);
3556 if (tem != quotient)
3557 emit_move_insn (quotient, tem);
3558 emit_jump_insn (gen_jump (label5));
3559 emit_barrier ();
3560 emit_label (label3);
3561 expand_dec (adjusted_op0, const1_rtx);
3562 emit_label (label4);
3563 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3564 quotient, 0, OPTAB_LIB_WIDEN);
3565 if (tem != quotient)
3566 emit_move_insn (quotient, tem);
3567 expand_dec (quotient, const1_rtx);
3568 emit_label (label5);
3570 break;
3572 case CEIL_DIV_EXPR:
3573 case CEIL_MOD_EXPR:
3574 if (unsignedp)
3576 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3578 rtx t1, t2, t3;
3579 unsigned HOST_WIDE_INT d = INTVAL (op1);
3580 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3581 build_int_2 (floor_log2 (d), 0),
3582 tquotient, 1);
3583 t2 = expand_binop (compute_mode, and_optab, op0,
3584 GEN_INT (d - 1),
3585 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3586 t3 = gen_reg_rtx (compute_mode);
3587 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3588 compute_mode, 1, 1);
3589 if (t3 == 0)
3591 rtx lab;
3592 lab = gen_label_rtx ();
3593 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3594 expand_inc (t1, const1_rtx);
3595 emit_label (lab);
3596 quotient = t1;
3598 else
3599 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3600 t1, t3),
3601 tquotient);
3602 break;
3605 /* Try using an instruction that produces both the quotient and
3606 remainder, using truncation. We can easily compensate the
3607 quotient or remainder to get ceiling rounding, once we have the
3608 remainder. Notice that we compute also the final remainder
3609 value here, and return the result right away. */
3610 if (target == 0 || GET_MODE (target) != compute_mode)
3611 target = gen_reg_rtx (compute_mode);
3613 if (rem_flag)
3615 remainder = (GET_CODE (target) == REG
3616 ? target : gen_reg_rtx (compute_mode));
3617 quotient = gen_reg_rtx (compute_mode);
3619 else
3621 quotient = (GET_CODE (target) == REG
3622 ? target : gen_reg_rtx (compute_mode));
3623 remainder = gen_reg_rtx (compute_mode);
3626 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3627 remainder, 1))
3629 /* This could be computed with a branch-less sequence.
3630 Save that for later. */
3631 rtx label = gen_label_rtx ();
3632 do_cmp_and_jump (remainder, const0_rtx, EQ,
3633 compute_mode, label);
3634 expand_inc (quotient, const1_rtx);
3635 expand_dec (remainder, op1);
3636 emit_label (label);
3637 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3640 /* No luck with division elimination or divmod. Have to do it
3641 by conditionally adjusting op0 *and* the result. */
3643 rtx label1, label2;
3644 rtx adjusted_op0, tem;
3646 quotient = gen_reg_rtx (compute_mode);
3647 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3648 label1 = gen_label_rtx ();
3649 label2 = gen_label_rtx ();
3650 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3651 compute_mode, label1);
3652 emit_move_insn (quotient, const0_rtx);
3653 emit_jump_insn (gen_jump (label2));
3654 emit_barrier ();
3655 emit_label (label1);
3656 expand_dec (adjusted_op0, const1_rtx);
3657 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3658 quotient, 1, OPTAB_LIB_WIDEN);
3659 if (tem != quotient)
3660 emit_move_insn (quotient, tem);
3661 expand_inc (quotient, const1_rtx);
3662 emit_label (label2);
3665 else /* signed */
3667 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3668 && INTVAL (op1) >= 0)
3670 /* This is extremely similar to the code for the unsigned case
3671 above. For 2.7 we should merge these variants, but for
3672 2.6.1 I don't want to touch the code for unsigned since that
3673 get used in C. The signed case will only be used by other
3674 languages (Ada). */
3676 rtx t1, t2, t3;
3677 unsigned HOST_WIDE_INT d = INTVAL (op1);
3678 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3679 build_int_2 (floor_log2 (d), 0),
3680 tquotient, 0);
3681 t2 = expand_binop (compute_mode, and_optab, op0,
3682 GEN_INT (d - 1),
3683 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3684 t3 = gen_reg_rtx (compute_mode);
3685 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3686 compute_mode, 1, 1);
3687 if (t3 == 0)
3689 rtx lab;
3690 lab = gen_label_rtx ();
3691 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3692 expand_inc (t1, const1_rtx);
3693 emit_label (lab);
3694 quotient = t1;
3696 else
3697 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3698 t1, t3),
3699 tquotient);
3700 break;
3703 /* Try using an instruction that produces both the quotient and
3704 remainder, using truncation. We can easily compensate the
3705 quotient or remainder to get ceiling rounding, once we have the
3706 remainder. Notice that we compute also the final remainder
3707 value here, and return the result right away. */
3708 if (target == 0 || GET_MODE (target) != compute_mode)
3709 target = gen_reg_rtx (compute_mode);
3710 if (rem_flag)
3712 remainder= (GET_CODE (target) == REG
3713 ? target : gen_reg_rtx (compute_mode));
3714 quotient = gen_reg_rtx (compute_mode);
3716 else
3718 quotient = (GET_CODE (target) == REG
3719 ? target : gen_reg_rtx (compute_mode));
3720 remainder = gen_reg_rtx (compute_mode);
3723 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3724 remainder, 0))
3726 /* This could be computed with a branch-less sequence.
3727 Save that for later. */
3728 rtx tem;
3729 rtx label = gen_label_rtx ();
3730 do_cmp_and_jump (remainder, const0_rtx, EQ,
3731 compute_mode, label);
3732 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3733 NULL_RTX, 0, OPTAB_WIDEN);
3734 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3735 expand_inc (quotient, const1_rtx);
3736 expand_dec (remainder, op1);
3737 emit_label (label);
3738 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3741 /* No luck with division elimination or divmod. Have to do it
3742 by conditionally adjusting op0 *and* the result. */
3744 rtx label1, label2, label3, label4, label5;
3745 rtx adjusted_op0;
3746 rtx tem;
3748 quotient = gen_reg_rtx (compute_mode);
3749 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3750 label1 = gen_label_rtx ();
3751 label2 = gen_label_rtx ();
3752 label3 = gen_label_rtx ();
3753 label4 = gen_label_rtx ();
3754 label5 = gen_label_rtx ();
3755 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3756 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3757 compute_mode, label1);
3758 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3759 quotient, 0, OPTAB_LIB_WIDEN);
3760 if (tem != quotient)
3761 emit_move_insn (quotient, tem);
3762 emit_jump_insn (gen_jump (label5));
3763 emit_barrier ();
3764 emit_label (label1);
3765 expand_dec (adjusted_op0, const1_rtx);
3766 emit_jump_insn (gen_jump (label4));
3767 emit_barrier ();
3768 emit_label (label2);
3769 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3770 compute_mode, label3);
3771 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3772 quotient, 0, OPTAB_LIB_WIDEN);
3773 if (tem != quotient)
3774 emit_move_insn (quotient, tem);
3775 emit_jump_insn (gen_jump (label5));
3776 emit_barrier ();
3777 emit_label (label3);
3778 expand_inc (adjusted_op0, const1_rtx);
3779 emit_label (label4);
3780 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3781 quotient, 0, OPTAB_LIB_WIDEN);
3782 if (tem != quotient)
3783 emit_move_insn (quotient, tem);
3784 expand_inc (quotient, const1_rtx);
3785 emit_label (label5);
3788 break;
3790 case EXACT_DIV_EXPR:
3791 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3793 HOST_WIDE_INT d = INTVAL (op1);
3794 unsigned HOST_WIDE_INT ml;
3795 int pre_shift;
3796 rtx t1;
3798 pre_shift = floor_log2 (d & -d);
3799 ml = invert_mod2n (d >> pre_shift, size);
3800 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3801 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
3802 quotient = expand_mult (compute_mode, t1, GEN_INT (ml), NULL_RTX,
3805 insn = get_last_insn ();
3806 set_unique_reg_note (insn,
3807 REG_EQUAL,
3808 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3809 compute_mode,
3810 op0, op1));
3812 break;
3814 case ROUND_DIV_EXPR:
3815 case ROUND_MOD_EXPR:
3816 if (unsignedp)
3818 rtx tem;
3819 rtx label;
3820 label = gen_label_rtx ();
3821 quotient = gen_reg_rtx (compute_mode);
3822 remainder = gen_reg_rtx (compute_mode);
3823 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3825 rtx tem;
3826 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3827 quotient, 1, OPTAB_LIB_WIDEN);
3828 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3829 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3830 remainder, 1, OPTAB_LIB_WIDEN);
3832 tem = plus_constant (op1, -1);
3833 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3834 build_int_2 (1, 0), NULL_RTX, 1);
3835 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3836 expand_inc (quotient, const1_rtx);
3837 expand_dec (remainder, op1);
3838 emit_label (label);
3840 else
3842 rtx abs_rem, abs_op1, tem, mask;
3843 rtx label;
3844 label = gen_label_rtx ();
3845 quotient = gen_reg_rtx (compute_mode);
3846 remainder = gen_reg_rtx (compute_mode);
3847 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3849 rtx tem;
3850 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3851 quotient, 0, OPTAB_LIB_WIDEN);
3852 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3853 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3854 remainder, 0, OPTAB_LIB_WIDEN);
3856 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
3857 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
3858 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3859 build_int_2 (1, 0), NULL_RTX, 1);
3860 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3861 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3862 NULL_RTX, 0, OPTAB_WIDEN);
3863 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3864 build_int_2 (size - 1, 0), NULL_RTX, 0);
3865 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3866 NULL_RTX, 0, OPTAB_WIDEN);
3867 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3868 NULL_RTX, 0, OPTAB_WIDEN);
3869 expand_inc (quotient, tem);
3870 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3871 NULL_RTX, 0, OPTAB_WIDEN);
3872 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3873 NULL_RTX, 0, OPTAB_WIDEN);
3874 expand_dec (remainder, tem);
3875 emit_label (label);
3877 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3879 default:
3880 abort ();
3883 if (quotient == 0)
3885 if (target && GET_MODE (target) != compute_mode)
3886 target = 0;
3888 if (rem_flag)
3890 /* Try to produce the remainder without producing the quotient.
3891 If we seem to have a divmod patten that does not require widening,
3892 don't try windening here. We should really have an WIDEN argument
3893 to expand_twoval_binop, since what we'd really like to do here is
3894 1) try a mod insn in compute_mode
3895 2) try a divmod insn in compute_mode
3896 3) try a div insn in compute_mode and multiply-subtract to get
3897 remainder
3898 4) try the same things with widening allowed. */
3899 remainder
3900 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3901 op0, op1, target,
3902 unsignedp,
3903 ((optab2->handlers[(int) compute_mode].insn_code
3904 != CODE_FOR_nothing)
3905 ? OPTAB_DIRECT : OPTAB_WIDEN));
3906 if (remainder == 0)
3908 /* No luck there. Can we do remainder and divide at once
3909 without a library call? */
3910 remainder = gen_reg_rtx (compute_mode);
3911 if (! expand_twoval_binop ((unsignedp
3912 ? udivmod_optab
3913 : sdivmod_optab),
3914 op0, op1,
3915 NULL_RTX, remainder, unsignedp))
3916 remainder = 0;
3919 if (remainder)
3920 return gen_lowpart (mode, remainder);
3923 /* Produce the quotient. Try a quotient insn, but not a library call.
3924 If we have a divmod in this mode, use it in preference to widening
3925 the div (for this test we assume it will not fail). Note that optab2
3926 is set to the one of the two optabs that the call below will use. */
3927 quotient
3928 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3929 op0, op1, rem_flag ? NULL_RTX : target,
3930 unsignedp,
3931 ((optab2->handlers[(int) compute_mode].insn_code
3932 != CODE_FOR_nothing)
3933 ? OPTAB_DIRECT : OPTAB_WIDEN));
3935 if (quotient == 0)
3937 /* No luck there. Try a quotient-and-remainder insn,
3938 keeping the quotient alone. */
3939 quotient = gen_reg_rtx (compute_mode);
3940 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3941 op0, op1,
3942 quotient, NULL_RTX, unsignedp))
3944 quotient = 0;
3945 if (! rem_flag)
3946 /* Still no luck. If we are not computing the remainder,
3947 use a library call for the quotient. */
3948 quotient = sign_expand_binop (compute_mode,
3949 udiv_optab, sdiv_optab,
3950 op0, op1, target,
3951 unsignedp, OPTAB_LIB_WIDEN);
3956 if (rem_flag)
3958 if (target && GET_MODE (target) != compute_mode)
3959 target = 0;
3961 if (quotient == 0)
3962 /* No divide instruction either. Use library for remainder. */
3963 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3964 op0, op1, target,
3965 unsignedp, OPTAB_LIB_WIDEN);
3966 else
3968 /* We divided. Now finish doing X - Y * (X / Y). */
3969 remainder = expand_mult (compute_mode, quotient, op1,
3970 NULL_RTX, unsignedp);
3971 remainder = expand_binop (compute_mode, sub_optab, op0,
3972 remainder, target, unsignedp,
3973 OPTAB_LIB_WIDEN);
3977 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3980 /* Return a tree node with data type TYPE, describing the value of X.
3981 Usually this is an RTL_EXPR, if there is no obvious better choice.
3982 X may be an expression, however we only support those expressions
3983 generated by loop.c. */
3985 tree
3986 make_tree (type, x)
3987 tree type;
3988 rtx x;
3990 tree t;
3992 switch (GET_CODE (x))
3994 case CONST_INT:
3995 t = build_int_2 (INTVAL (x),
3996 (TREE_UNSIGNED (type)
3997 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
3998 || INTVAL (x) >= 0 ? 0 : -1);
3999 TREE_TYPE (t) = type;
4000 return t;
4002 case CONST_DOUBLE:
4003 if (GET_MODE (x) == VOIDmode)
4005 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4006 TREE_TYPE (t) = type;
4008 else
4010 REAL_VALUE_TYPE d;
4012 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4013 t = build_real (type, d);
4016 return t;
4018 case PLUS:
4019 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4020 make_tree (type, XEXP (x, 1))));
4022 case MINUS:
4023 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4024 make_tree (type, XEXP (x, 1))));
4026 case NEG:
4027 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4029 case MULT:
4030 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4031 make_tree (type, XEXP (x, 1))));
4033 case ASHIFT:
4034 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4035 make_tree (type, XEXP (x, 1))));
4037 case LSHIFTRT:
4038 return fold (convert (type,
4039 build (RSHIFT_EXPR, unsigned_type (type),
4040 make_tree (unsigned_type (type),
4041 XEXP (x, 0)),
4042 make_tree (type, XEXP (x, 1)))));
4044 case ASHIFTRT:
4045 return fold (convert (type,
4046 build (RSHIFT_EXPR, signed_type (type),
4047 make_tree (signed_type (type), XEXP (x, 0)),
4048 make_tree (type, XEXP (x, 1)))));
4050 case DIV:
4051 if (TREE_CODE (type) != REAL_TYPE)
4052 t = signed_type (type);
4053 else
4054 t = type;
4056 return fold (convert (type,
4057 build (TRUNC_DIV_EXPR, t,
4058 make_tree (t, XEXP (x, 0)),
4059 make_tree (t, XEXP (x, 1)))));
4060 case UDIV:
4061 t = unsigned_type (type);
4062 return fold (convert (type,
4063 build (TRUNC_DIV_EXPR, t,
4064 make_tree (t, XEXP (x, 0)),
4065 make_tree (t, XEXP (x, 1)))));
4066 default:
4067 t = make_node (RTL_EXPR);
4068 TREE_TYPE (t) = type;
4069 RTL_EXPR_RTL (t) = x;
4070 /* There are no insns to be output
4071 when this rtl_expr is used. */
4072 RTL_EXPR_SEQUENCE (t) = 0;
4073 return t;
4077 /* Return an rtx representing the value of X * MULT + ADD.
4078 TARGET is a suggestion for where to store the result (an rtx).
4079 MODE is the machine mode for the computation.
4080 X and MULT must have mode MODE. ADD may have a different mode.
4081 So can X (defaults to same as MODE).
4082 UNSIGNEDP is non-zero to do unsigned multiplication.
4083 This may emit insns. */
4086 expand_mult_add (x, target, mult, add, mode, unsignedp)
4087 rtx x, target, mult, add;
4088 enum machine_mode mode;
4089 int unsignedp;
4091 tree type = type_for_mode (mode, unsignedp);
4092 tree add_type = (GET_MODE (add) == VOIDmode
4093 ? type : type_for_mode (GET_MODE (add), unsignedp));
4094 tree result = fold (build (PLUS_EXPR, type,
4095 fold (build (MULT_EXPR, type,
4096 make_tree (type, x),
4097 make_tree (type, mult))),
4098 make_tree (add_type, add)));
4100 return expand_expr (result, target, VOIDmode, 0);
4103 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4104 and returning TARGET.
4106 If TARGET is 0, a pseudo-register or constant is returned. */
4109 expand_and (op0, op1, target)
4110 rtx op0, op1, target;
4112 enum machine_mode mode = VOIDmode;
4113 rtx tem;
4115 if (GET_MODE (op0) != VOIDmode)
4116 mode = GET_MODE (op0);
4117 else if (GET_MODE (op1) != VOIDmode)
4118 mode = GET_MODE (op1);
4120 if (mode != VOIDmode)
4121 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4122 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
4123 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
4124 else
4125 abort ();
4127 if (target == 0)
4128 target = tem;
4129 else if (tem != target)
4130 emit_move_insn (target, tem);
4131 return target;
4134 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4135 and storing in TARGET. Normally return TARGET.
4136 Return 0 if that cannot be done.
4138 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4139 it is VOIDmode, they cannot both be CONST_INT.
4141 UNSIGNEDP is for the case where we have to widen the operands
4142 to perform the operation. It says to use zero-extension.
4144 NORMALIZEP is 1 if we should convert the result to be either zero
4145 or one. Normalize is -1 if we should convert the result to be
4146 either zero or -1. If NORMALIZEP is zero, the result will be left
4147 "raw" out of the scc insn. */
4150 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4151 rtx target;
4152 enum rtx_code code;
4153 rtx op0, op1;
4154 enum machine_mode mode;
4155 int unsignedp;
4156 int normalizep;
4158 rtx subtarget;
4159 enum insn_code icode;
4160 enum machine_mode compare_mode;
4161 enum machine_mode target_mode = GET_MODE (target);
4162 rtx tem;
4163 rtx last = get_last_insn ();
4164 rtx pattern, comparison;
4166 if (unsignedp)
4167 code = unsigned_condition (code);
4169 /* If one operand is constant, make it the second one. Only do this
4170 if the other operand is not constant as well. */
4172 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
4173 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
4175 tem = op0;
4176 op0 = op1;
4177 op1 = tem;
4178 code = swap_condition (code);
4181 if (mode == VOIDmode)
4182 mode = GET_MODE (op0);
4184 /* For some comparisons with 1 and -1, we can convert this to
4185 comparisons with zero. This will often produce more opportunities for
4186 store-flag insns. */
4188 switch (code)
4190 case LT:
4191 if (op1 == const1_rtx)
4192 op1 = const0_rtx, code = LE;
4193 break;
4194 case LE:
4195 if (op1 == constm1_rtx)
4196 op1 = const0_rtx, code = LT;
4197 break;
4198 case GE:
4199 if (op1 == const1_rtx)
4200 op1 = const0_rtx, code = GT;
4201 break;
4202 case GT:
4203 if (op1 == constm1_rtx)
4204 op1 = const0_rtx, code = GE;
4205 break;
4206 case GEU:
4207 if (op1 == const1_rtx)
4208 op1 = const0_rtx, code = NE;
4209 break;
4210 case LTU:
4211 if (op1 == const1_rtx)
4212 op1 = const0_rtx, code = EQ;
4213 break;
4214 default:
4215 break;
4218 /* If we are comparing a double-word integer with zero, we can convert
4219 the comparison into one involving a single word. */
4220 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4221 && GET_MODE_CLASS (mode) == MODE_INT
4222 && op1 == const0_rtx)
4224 if (code == EQ || code == NE)
4226 /* Do a logical OR of the two words and compare the result. */
4227 rtx op0h = gen_highpart (word_mode, op0);
4228 rtx op0l = gen_lowpart (word_mode, op0);
4229 rtx op0both = expand_binop (word_mode, ior_optab, op0h, op0l,
4230 NULL_RTX, unsignedp, OPTAB_DIRECT);
4231 if (op0both != 0)
4232 return emit_store_flag (target, code, op0both, op1, word_mode,
4233 unsignedp, normalizep);
4235 else if (code == LT || code == GE)
4236 /* If testing the sign bit, can just test on high word. */
4237 return emit_store_flag (target, code, gen_highpart (word_mode, op0),
4238 op1, word_mode, unsignedp, normalizep);
4241 /* From now on, we won't change CODE, so set ICODE now. */
4242 icode = setcc_gen_code[(int) code];
4244 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4245 complement of A (for GE) and shifting the sign bit to the low bit. */
4246 if (op1 == const0_rtx && (code == LT || code == GE)
4247 && GET_MODE_CLASS (mode) == MODE_INT
4248 && (normalizep || STORE_FLAG_VALUE == 1
4249 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4250 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4251 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4253 subtarget = target;
4255 /* If the result is to be wider than OP0, it is best to convert it
4256 first. If it is to be narrower, it is *incorrect* to convert it
4257 first. */
4258 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4260 op0 = protect_from_queue (op0, 0);
4261 op0 = convert_modes (target_mode, mode, op0, 0);
4262 mode = target_mode;
4265 if (target_mode != mode)
4266 subtarget = 0;
4268 if (code == GE)
4269 op0 = expand_unop (mode, one_cmpl_optab, op0,
4270 ((STORE_FLAG_VALUE == 1 || normalizep)
4271 ? 0 : subtarget), 0);
4273 if (STORE_FLAG_VALUE == 1 || normalizep)
4274 /* If we are supposed to produce a 0/1 value, we want to do
4275 a logical shift from the sign bit to the low-order bit; for
4276 a -1/0 value, we do an arithmetic shift. */
4277 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4278 size_int (GET_MODE_BITSIZE (mode) - 1),
4279 subtarget, normalizep != -1);
4281 if (mode != target_mode)
4282 op0 = convert_modes (target_mode, mode, op0, 0);
4284 return op0;
4287 if (icode != CODE_FOR_nothing)
4289 insn_operand_predicate_fn pred;
4291 /* We think we may be able to do this with a scc insn. Emit the
4292 comparison and then the scc insn.
4294 compare_from_rtx may call emit_queue, which would be deleted below
4295 if the scc insn fails. So call it ourselves before setting LAST.
4296 Likewise for do_pending_stack_adjust. */
4298 emit_queue ();
4299 do_pending_stack_adjust ();
4300 last = get_last_insn ();
4302 comparison
4303 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4304 if (GET_CODE (comparison) == CONST_INT)
4305 return (comparison == const0_rtx ? const0_rtx
4306 : normalizep == 1 ? const1_rtx
4307 : normalizep == -1 ? constm1_rtx
4308 : const_true_rtx);
4310 /* If the code of COMPARISON doesn't match CODE, something is
4311 wrong; we can no longer be sure that we have the operation.
4312 We could handle this case, but it should not happen. */
4314 if (GET_CODE (comparison) != code)
4315 abort ();
4317 /* Get a reference to the target in the proper mode for this insn. */
4318 compare_mode = insn_data[(int) icode].operand[0].mode;
4319 subtarget = target;
4320 pred = insn_data[(int) icode].operand[0].predicate;
4321 if (preserve_subexpressions_p ()
4322 || ! (*pred) (subtarget, compare_mode))
4323 subtarget = gen_reg_rtx (compare_mode);
4325 pattern = GEN_FCN (icode) (subtarget);
4326 if (pattern)
4328 emit_insn (pattern);
4330 /* If we are converting to a wider mode, first convert to
4331 TARGET_MODE, then normalize. This produces better combining
4332 opportunities on machines that have a SIGN_EXTRACT when we are
4333 testing a single bit. This mostly benefits the 68k.
4335 If STORE_FLAG_VALUE does not have the sign bit set when
4336 interpreted in COMPARE_MODE, we can do this conversion as
4337 unsigned, which is usually more efficient. */
4338 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4340 convert_move (target, subtarget,
4341 (GET_MODE_BITSIZE (compare_mode)
4342 <= HOST_BITS_PER_WIDE_INT)
4343 && 0 == (STORE_FLAG_VALUE
4344 & ((HOST_WIDE_INT) 1
4345 << (GET_MODE_BITSIZE (compare_mode) -1))));
4346 op0 = target;
4347 compare_mode = target_mode;
4349 else
4350 op0 = subtarget;
4352 /* If we want to keep subexpressions around, don't reuse our
4353 last target. */
4355 if (preserve_subexpressions_p ())
4356 subtarget = 0;
4358 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4359 we don't have to do anything. */
4360 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4362 /* STORE_FLAG_VALUE might be the most negative number, so write
4363 the comparison this way to avoid a compiler-time warning. */
4364 else if (- normalizep == STORE_FLAG_VALUE)
4365 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4367 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4368 makes it hard to use a value of just the sign bit due to
4369 ANSI integer constant typing rules. */
4370 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4371 && (STORE_FLAG_VALUE
4372 & ((HOST_WIDE_INT) 1
4373 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4374 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4375 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4376 subtarget, normalizep == 1);
4377 else if (STORE_FLAG_VALUE & 1)
4379 op0 = expand_and (op0, const1_rtx, subtarget);
4380 if (normalizep == -1)
4381 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4383 else
4384 abort ();
4386 /* If we were converting to a smaller mode, do the
4387 conversion now. */
4388 if (target_mode != compare_mode)
4390 convert_move (target, op0, 0);
4391 return target;
4393 else
4394 return op0;
4398 delete_insns_since (last);
4400 /* If expensive optimizations, use different pseudo registers for each
4401 insn, instead of reusing the same pseudo. This leads to better CSE,
4402 but slows down the compiler, since there are more pseudos */
4403 subtarget = (!flag_expensive_optimizations
4404 && (target_mode == mode)) ? target : NULL_RTX;
4406 /* If we reached here, we can't do this with a scc insn. However, there
4407 are some comparisons that can be done directly. For example, if
4408 this is an equality comparison of integers, we can try to exclusive-or
4409 (or subtract) the two operands and use a recursive call to try the
4410 comparison with zero. Don't do any of these cases if branches are
4411 very cheap. */
4413 if (BRANCH_COST > 0
4414 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4415 && op1 != const0_rtx)
4417 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4418 OPTAB_WIDEN);
4420 if (tem == 0)
4421 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4422 OPTAB_WIDEN);
4423 if (tem != 0)
4424 tem = emit_store_flag (target, code, tem, const0_rtx,
4425 mode, unsignedp, normalizep);
4426 if (tem == 0)
4427 delete_insns_since (last);
4428 return tem;
4431 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4432 the constant zero. Reject all other comparisons at this point. Only
4433 do LE and GT if branches are expensive since they are expensive on
4434 2-operand machines. */
4436 if (BRANCH_COST == 0
4437 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4438 || (code != EQ && code != NE
4439 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4440 return 0;
4442 /* See what we need to return. We can only return a 1, -1, or the
4443 sign bit. */
4445 if (normalizep == 0)
4447 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4448 normalizep = STORE_FLAG_VALUE;
4450 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4451 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4452 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4454 else
4455 return 0;
4458 /* Try to put the result of the comparison in the sign bit. Assume we can't
4459 do the necessary operation below. */
4461 tem = 0;
4463 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4464 the sign bit set. */
4466 if (code == LE)
4468 /* This is destructive, so SUBTARGET can't be OP0. */
4469 if (rtx_equal_p (subtarget, op0))
4470 subtarget = 0;
4472 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4473 OPTAB_WIDEN);
4474 if (tem)
4475 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4476 OPTAB_WIDEN);
4479 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4480 number of bits in the mode of OP0, minus one. */
4482 if (code == GT)
4484 if (rtx_equal_p (subtarget, op0))
4485 subtarget = 0;
4487 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4488 size_int (GET_MODE_BITSIZE (mode) - 1),
4489 subtarget, 0);
4490 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4491 OPTAB_WIDEN);
4494 if (code == EQ || code == NE)
4496 /* For EQ or NE, one way to do the comparison is to apply an operation
4497 that converts the operand into a positive number if it is non-zero
4498 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4499 for NE we negate. This puts the result in the sign bit. Then we
4500 normalize with a shift, if needed.
4502 Two operations that can do the above actions are ABS and FFS, so try
4503 them. If that doesn't work, and MODE is smaller than a full word,
4504 we can use zero-extension to the wider mode (an unsigned conversion)
4505 as the operation. */
4507 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4508 that is compensated by the subsequent overflow when subtracting
4509 one / negating. */
4511 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4512 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4513 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4514 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4515 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4517 op0 = protect_from_queue (op0, 0);
4518 tem = convert_modes (word_mode, mode, op0, 1);
4519 mode = word_mode;
4522 if (tem != 0)
4524 if (code == EQ)
4525 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4526 0, OPTAB_WIDEN);
4527 else
4528 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4531 /* If we couldn't do it that way, for NE we can "or" the two's complement
4532 of the value with itself. For EQ, we take the one's complement of
4533 that "or", which is an extra insn, so we only handle EQ if branches
4534 are expensive. */
4536 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4538 if (rtx_equal_p (subtarget, op0))
4539 subtarget = 0;
4541 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4542 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4543 OPTAB_WIDEN);
4545 if (tem && code == EQ)
4546 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4550 if (tem && normalizep)
4551 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4552 size_int (GET_MODE_BITSIZE (mode) - 1),
4553 subtarget, normalizep == 1);
4555 if (tem)
4557 if (GET_MODE (tem) != target_mode)
4559 convert_move (target, tem, 0);
4560 tem = target;
4562 else if (!subtarget)
4564 emit_move_insn (target, tem);
4565 tem = target;
4568 else
4569 delete_insns_since (last);
4571 return tem;
4574 /* Like emit_store_flag, but always succeeds. */
4577 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4578 rtx target;
4579 enum rtx_code code;
4580 rtx op0, op1;
4581 enum machine_mode mode;
4582 int unsignedp;
4583 int normalizep;
4585 rtx tem, label;
4587 /* First see if emit_store_flag can do the job. */
4588 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4589 if (tem != 0)
4590 return tem;
4592 if (normalizep == 0)
4593 normalizep = 1;
4595 /* If this failed, we have to do this with set/compare/jump/set code. */
4597 if (GET_CODE (target) != REG
4598 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4599 target = gen_reg_rtx (GET_MODE (target));
4601 emit_move_insn (target, const1_rtx);
4602 label = gen_label_rtx ();
4603 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, 0,
4604 NULL_RTX, label);
4606 emit_move_insn (target, const0_rtx);
4607 emit_label (label);
4609 return target;
4612 /* Perform possibly multi-word comparison and conditional jump to LABEL
4613 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4615 The algorithm is based on the code in expr.c:do_jump.
4617 Note that this does not perform a general comparison. Only variants
4618 generated within expmed.c are correctly handled, others abort (but could
4619 be handled if needed). */
4621 static void
4622 do_cmp_and_jump (arg1, arg2, op, mode, label)
4623 rtx arg1, arg2, label;
4624 enum rtx_code op;
4625 enum machine_mode mode;
4627 /* If this mode is an integer too wide to compare properly,
4628 compare word by word. Rely on cse to optimize constant cases. */
4630 if (GET_MODE_CLASS (mode) == MODE_INT
4631 && ! can_compare_p (op, mode, ccp_jump))
4633 rtx label2 = gen_label_rtx ();
4635 switch (op)
4637 case LTU:
4638 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4639 break;
4641 case LEU:
4642 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4643 break;
4645 case LT:
4646 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4647 break;
4649 case GT:
4650 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4651 break;
4653 case GE:
4654 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4655 break;
4657 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4658 that's the only equality operations we do */
4659 case EQ:
4660 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4661 abort();
4662 do_jump_by_parts_equality_rtx (arg1, label2, label);
4663 break;
4665 case NE:
4666 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4667 abort();
4668 do_jump_by_parts_equality_rtx (arg1, label, label2);
4669 break;
4671 default:
4672 abort();
4675 emit_label (label2);
4677 else
4679 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, 0, label);